From d93cde55dd44428e949ac1db7423ccc4375e733f Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 19 Jul 2012 13:49:28 +0200 Subject: [PATCH 001/719] Initial commit; tentative and incomplete skeleton of an API --- src/com/datastax/driver/core/AuthInfo.java | 9 ++ .../datastax/driver/core/BoundStatement.java | 45 +++++++ src/com/datastax/driver/core/CQLRow.java | 52 ++++++++ src/com/datastax/driver/core/Cluster.java | 74 ++++++++++++ src/com/datastax/driver/core/Columns.java | 82 +++++++++++++ src/com/datastax/driver/core/DataType.java | 8 ++ .../driver/core/PreparedStatement.java | 25 ++++ src/com/datastax/driver/core/ResultSet.java | 64 ++++++++++ src/com/datastax/driver/core/Session.java | 111 ++++++++++++++++++ 9 files changed, 470 insertions(+) create mode 100644 src/com/datastax/driver/core/AuthInfo.java create mode 100644 src/com/datastax/driver/core/BoundStatement.java create mode 100644 src/com/datastax/driver/core/CQLRow.java create mode 100644 src/com/datastax/driver/core/Cluster.java create mode 100644 src/com/datastax/driver/core/Columns.java create mode 100644 src/com/datastax/driver/core/DataType.java create mode 100644 src/com/datastax/driver/core/PreparedStatement.java create mode 100644 src/com/datastax/driver/core/ResultSet.java create mode 100644 src/com/datastax/driver/core/Session.java diff --git a/src/com/datastax/driver/core/AuthInfo.java b/src/com/datastax/driver/core/AuthInfo.java new file mode 100644 index 00000000000..6675747c470 --- /dev/null +++ b/src/com/datastax/driver/core/AuthInfo.java @@ -0,0 +1,9 @@ +package com.datastax.driver.core; + +/** + * Authentication information to connect to a Cassandra node. + * + * TODO (and define what this is in particular) + */ +public class AuthInfo { +} diff --git a/src/com/datastax/driver/core/BoundStatement.java b/src/com/datastax/driver/core/BoundStatement.java new file mode 100644 index 00000000000..8662e55a87d --- /dev/null +++ b/src/com/datastax/driver/core/BoundStatement.java @@ -0,0 +1,45 @@ +package com.datastax.driver.core; + +public class BoundStatement { + + /** + * Returns the prepared statement on which this BoundStatement is based. + * + * @return the prepared statement on which this BoundStatement is based. + */ + public PreparedStatement preparedStatement() { + return null; + } + + /** + * Returns whether all variables have been bound to values in thi + * BoundStatement. + * + * @return whether all variables are bound. + */ + public boolean ready() { + return false; + } + + public BoundStatement bind(Object... values) { + return null; + } + + public BoundStatement setBool(int i, boolean v) { + return null; + } + + public BoundStatement setBool(String name, boolean v) { + return null; + } + + public BoundStatement setInt(int i, int v) { + return null; + } + + public BoundStatement setInt(String name, int v) { + return null; + } + + // ... +} diff --git a/src/com/datastax/driver/core/CQLRow.java b/src/com/datastax/driver/core/CQLRow.java new file mode 100644 index 00000000000..995b5f3bc88 --- /dev/null +++ b/src/com/datastax/driver/core/CQLRow.java @@ -0,0 +1,52 @@ +package com.datastax.driver.core; + +import java.util.Date; + +/** + * A CQL Row returned in a {@link ResultSet}. + */ +public class CQLRow { + + /** + * The columns contains in this CQLRow. + * + * @return the columns contained in this CQLRow. + */ + public Columns columns() { + return null; + } + + public boolean getBool(int i) { + return false; + } + + public boolean getBool(String name) { + return false; + } + + public int getInt(int i) { + return 0; + } + + public int getInt(String name) { + return 0; + } + + public long getLong(int i) { + return 0; + } + + public long getLong(String name) { + return 0; + } + + public Date getDate(int i) { + return null; + } + + public Date getDate(String name) { + return null; + } + + // ... +} diff --git a/src/com/datastax/driver/core/Cluster.java b/src/com/datastax/driver/core/Cluster.java new file mode 100644 index 00000000000..e7671b464e3 --- /dev/null +++ b/src/com/datastax/driver/core/Cluster.java @@ -0,0 +1,74 @@ +package com.datastax.driver.core; + +/** + * Informations and known state of a Cassandra cluster. + *

+ * This is the main entry point of the driver. A simple example of access to a + * Cassandra cluster would be: + * + * Cluster cluster = Cluster.Builder().addContactPoint("192.168.0.1").build(); + * Session session = cluster.connect("db1"); + * + * for (CQLRow row : session.execute("SELECT * FROM table1")) + * // do something ... + * + *

+ * A cluster object maintains a permanent connection to one of the cluster node + * that it uses solely to maintain informations on the state and current + * topology of the cluster. Using the connection, the driver will discover all + * the nodes composing the cluster as well as new nodes joining the cluster. + * You can disable that connection through the disableStateConnection() method. + * This is however discouraged as it means queries will only ever be executed + * against node set as contact point. If you want to limit the number of nodes + * to which this driver connects to, prefer maxConnectedNode(). + */ +public class Cluster { + + /** + * Creates a new session on this cluster. + * + * @return a new session on this cluster sets to no keyspace. + */ + public Session connect() { + return null; + } + + /** + * Creates a new session on this cluster. + * + * @param authInfo The authorisation credentials to use to connect to + * Cassandra nodes. + * @return a new session on this cluster sets to no keyspace. + */ + public Session connect(AuthInfo authInfo) { + return null; + } + + /** + * Creates a new session on this cluster and sets a keyspace to use. + * + * @param keyspaceName The name of the keyspace to use for the created + * Session. This can be later changed using {@link Session#use}. + * @return a new session on this cluster sets to keyspace + * keyspaceName. + */ + public Session connect(String keyspace) { + return null; + } + + /** + * Creates a new session on this cluster and sets a keyspace to use. + * + * @param authInfo The authorisation credentials to use to connect to + * Cassandra nodes. + * @return a new session on this cluster sets to keyspace + * keyspaceName. + */ + public Session connect(String keyspace, AuthInfo authInfo) { + return null; + } + + public class Builder { + // TODO + } +} diff --git a/src/com/datastax/driver/core/Columns.java b/src/com/datastax/driver/core/Columns.java new file mode 100644 index 00000000000..b07c7a19b80 --- /dev/null +++ b/src/com/datastax/driver/core/Columns.java @@ -0,0 +1,82 @@ +package com.datastax.driver.core; + +/** + * Metadata describing the columns returned in a {@link ResultSet} or a + * {@link PreparedStatement}. + */ +public class Columns { + + /** + * Returns the number of columns described by this Columns + * instance. + * + * @return the number of columns described by this metadata. + */ + public int count() { + return 0; + } + + /** + * Returns the name of the ith column in this metadata. + * + * @return the name of the ith column in this metadata. + */ + public String name(int i) { + return null; + } + + /** + * Returns the type of the ith column in this metadata. + * + * @return the type of the ith column in this metadata. + */ + public DataType type(int i) { + return null; + } + + /** + * Returns the type of column name in this metadata. + * + * @return the type of column name in this metadata. + */ + public DataType type(String name) { + return null; + } + + /** + * Returns the keyspace of the ith column in this metadata. + * + * @return the keyspace of the ith column in this metadata. + */ + public String keyspace(int i) { + return null; + } + + /** + * Returns the keyspace of column name in this metadata. + * + * @return the keyspace of column name in this metadata. + */ + public String keyspace(String name) { + return null; + } + + /** + * Returns the table of the ith column in this metadata. + * + * @return the table of the ith column in this metadata. + */ + public String table(int i) { + return null; + } + + /** + * Returns the table of column name in this metadata. + * + * @return the table of column name in this metadata. + */ + public String table(String name) { + return null; + } + +} diff --git a/src/com/datastax/driver/core/DataType.java b/src/com/datastax/driver/core/DataType.java new file mode 100644 index 00000000000..92b5b908a2c --- /dev/null +++ b/src/com/datastax/driver/core/DataType.java @@ -0,0 +1,8 @@ +package com.datastax.driver.core; + +/** + * Supported data types for columns. + */ +public class DataType { + // TODO +} diff --git a/src/com/datastax/driver/core/PreparedStatement.java b/src/com/datastax/driver/core/PreparedStatement.java new file mode 100644 index 00000000000..17131c75abb --- /dev/null +++ b/src/com/datastax/driver/core/PreparedStatement.java @@ -0,0 +1,25 @@ +package com.datastax.driver.core; + +/** + * Represents a prepared statement, a query with bound variables that has been + * prepared (pre-parsed) by the database. + *

+ * A prepared statement can be executed once concrete values has been provided + * for the bound variables. The pair of a prepared statement and values for its + * bound variables is a BoundStatement and can be executed by + * {@link Session#executePrepared}. + */ +public class PreparedStatement { + + public Columns variables() { + return null; + } + + public BoundStatement bind(Object... values) { + return null; + } + + public BoundStatement newBoundStatement() { + return null; + } +} diff --git a/src/com/datastax/driver/core/ResultSet.java b/src/com/datastax/driver/core/ResultSet.java new file mode 100644 index 00000000000..1c4a9bdfab2 --- /dev/null +++ b/src/com/datastax/driver/core/ResultSet.java @@ -0,0 +1,64 @@ +package com.datastax.driver.core; + +import java.util.Iterator; +import java.util.List; + +/** + * The result of a query. + */ +public class ResultSet implements Iterable { + + /** + * The columns returned in this ResultSet. + * + * @return the columns returned in this ResultSet. + */ + public Columns columns() { + return null; + } + + /** + * Test whether this ResultSet has more results. + * + * @return whether this ResultSet has more results. + */ + public boolean isExhausted() { + return true; + } + + /** + * Returns the the next result from this ResultSet. + * + * @return the next row in this resultSet or null if this ResultSet is + * exhausted. + */ + public CQLRow fetchOne() { + return null; + } + + /** + * Returns all the remaining rows in this ResultSet as a list. + * + * @return a list containing the remaining results of this ResultSet. The + * returned list is empty if and only the ResultSet is exhausted. + */ + public List fetchAll() { + return null; + } + + /** + * An iterator over the rows contained in this ResultSet. + * + * The {@link Iterator.next iterator next()} method is equivalent to + * calling {@link fetchOne}. So this iterator will consume results from + * this ResultSet and after a full iteration, the ResultSet will be empty. + * + * The returned iterator does not support the {@link Iterato.remove} method. + * + * @return an iterator that will consume and return the remaining rows of + * this ResultSet. + */ + public Iterator iterator() { + return null; + } +} diff --git a/src/com/datastax/driver/core/Session.java b/src/com/datastax/driver/core/Session.java new file mode 100644 index 00000000000..25f455c5633 --- /dev/null +++ b/src/com/datastax/driver/core/Session.java @@ -0,0 +1,111 @@ +package com.datastax.driver.core; + +/** + * A session holds connections to a Cassandra cluster, allowing to query it. + * + * Each session will maintain multiple connections to the cluster nodes, and + * provides policies to choose which node to use for each query (round-robin on + * all nodes of the cluster by default), handles retries for failed query (when + * it makes sense), etc... + *

+ * Session instances are thread-safe and usually a single instance is enough + * per application. However, a given session can only use set to one keyspace + * at a time, so this is really more one instance per keyspace used. + */ +public class Session { + + /** + * Sets the current keyspace to use for this session. + * + * Note that it is up to the application to synchronize calls to this + * method with queries executed against this session. + * + * @param keyspace the name of the keyspace to set + * @return this session. + * + */ + public Session use(String keyspace) { + return null; + } + + /** + * Execute the provided query. + * + * This method blocks until at least some result has been received from the + * database. However, for SELECT queries, it does not guarantee that the + * result has been received in full. But it does guarantee that some + * response has been received from the database, and in particular + * guarantee that if the request is invalid, an exception will be thrown + * by this method. + * + * @param query the CQL query to execute + * @return the result of the query. That result will never be null be can + * be empty and will be for any non SELECT query. + */ + public ResultSet execute(String query) { + return null; + } + + /** + * Execute the provided query asynchronously. + * + * This method does not block. It returns as soon as the query has been + * successfully sent to a Cassandra node. In particular, returning from + * this method does not guarantee that the query is valid. Any exception + * pertaining to the failure of the query will be thrown by the first + * access to the {@link ResultSet}. + * + * Note that for queries that doesn't return a result (INSERT, UPDATE and + * DELETE), you will need to access the ResultSet (i.e. call any of its + * method) to make sure the query was successful. + * + * @param query the CQL query to execute + * @return the result of the query. That result will never be null be can + * be empty and will be for any non SELECT query. + */ + public ResultSet executeAsync(String query) { + return null; + } + + /** + * Prepare the provided query. + * + * @param query the CQL query to prepare + * @return the prepared statement corresponding to query. + */ + public PreparedStatement prepare(String query) { + return null; + } + + /** + * Execute a prepared statement that had values provided for its bound + * variables. + * + * This method performs like {@link execute} but for prepared statements. + * It blocks until at least some result has been received from the + * database. + * + * @param stmt the prepared statement with values for its bound variables. + * @return the result of the query. That result will never be null be can + * be empty and will be for any non SELECT query. + */ + public ResultSet executePrepared(BoundStatement stmt) { + return null; + } + + /** + * Execute a prepared statement that had values provided for its bound + * variables asynchronously. + * + * This method performs like {@link executeAsync} but for prepared + * statements. It return as soon as the query has been successfully sent to + * the database. + * + * @param stmt the prepared statement with values for its bound variables. + * @return the result of the query. That result will never be null be can + * be empty and will be for any non SELECT query. + */ + public ResultSet executePreparedAsync(BoundStatement stmt) { + return null; + } +} From d3c5a9863e9af91ef9b9a334e6c5c2d5177e0ac5 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 19 Jul 2012 13:57:20 +0200 Subject: [PATCH 002/719] Adds a README --- README | 4 ++++ TODO | 8 ++++++++ 2 files changed, 12 insertions(+) create mode 100644 README create mode 100644 TODO diff --git a/README b/README new file mode 100644 index 00000000000..5040263fd02 --- /dev/null +++ b/README @@ -0,0 +1,4 @@ +Cassandra Java Driver +===================== + +A work in progress java driver using the binary protocol and the full power of CQL3. diff --git a/TODO b/TODO new file mode 100644 index 00000000000..57a41362863 --- /dev/null +++ b/TODO @@ -0,0 +1,8 @@ +TODO: +===== + +Everything, but in particular: +- Built system +- Make it work +- Tests +- Documentation From 98f8d0a12463ba0ec55cdc2bc23a7141ca8fb924 Mon Sep 17 00:00:00 2001 From: mfiguiere Date: Thu, 19 Jul 2012 15:55:42 +0200 Subject: [PATCH 003/719] Initialize maven hierarchy --- .project | 17 ++++++ .settings/org.eclipse.m2e.core.prefs | 4 ++ driver-core/.classpath | 26 ++++++++ driver-core/.project | 23 +++++++ .../.settings/org.eclipse.jdt.core.prefs | 5 ++ .../.settings/org.eclipse.m2e.core.prefs | 4 ++ driver-core/pom.xml | 23 +++++++ .../com/datastax/driver/core/AuthInfo.java | 0 .../datastax/driver/core/BoundStatement.java | 0 .../com/datastax/driver/core/CQLRow.java | 0 .../com/datastax/driver/core/Cluster.java | 0 .../com/datastax/driver/core/Columns.java | 0 .../com/datastax/driver/core/DataType.java | 0 .../driver/core/PreparedStatement.java | 0 .../com/datastax/driver/core/ResultSet.java | 0 .../com/datastax/driver/core/Session.java | 0 driver-jdbc/.classpath | 26 ++++++++ driver-jdbc/.project | 23 +++++++ .../.settings/org.eclipse.jdt.core.prefs | 5 ++ .../.settings/org.eclipse.m2e.core.prefs | 4 ++ driver-jdbc/pom.xml | 18 ++++++ pom.xml | 60 +++++++++++++++++++ 22 files changed, 238 insertions(+) create mode 100644 .project create mode 100644 .settings/org.eclipse.m2e.core.prefs create mode 100644 driver-core/.classpath create mode 100644 driver-core/.project create mode 100644 driver-core/.settings/org.eclipse.jdt.core.prefs create mode 100644 driver-core/.settings/org.eclipse.m2e.core.prefs create mode 100644 driver-core/pom.xml rename {src => driver-core/src/main/java}/com/datastax/driver/core/AuthInfo.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/BoundStatement.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/CQLRow.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/Cluster.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/Columns.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/DataType.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/PreparedStatement.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/ResultSet.java (100%) rename {src => driver-core/src/main/java}/com/datastax/driver/core/Session.java (100%) create mode 100644 driver-jdbc/.classpath create mode 100644 driver-jdbc/.project create mode 100644 driver-jdbc/.settings/org.eclipse.jdt.core.prefs create mode 100644 driver-jdbc/.settings/org.eclipse.m2e.core.prefs create mode 100644 driver-jdbc/pom.xml create mode 100644 pom.xml diff --git a/.project b/.project new file mode 100644 index 00000000000..79ff633c046 --- /dev/null +++ b/.project @@ -0,0 +1,17 @@ + + + cassandra-driver-parent + + + + + + org.eclipse.m2e.core.maven2Builder + + + + + + org.eclipse.m2e.core.maven2Nature + + diff --git a/.settings/org.eclipse.m2e.core.prefs b/.settings/org.eclipse.m2e.core.prefs new file mode 100644 index 00000000000..f897a7f1cb2 --- /dev/null +++ b/.settings/org.eclipse.m2e.core.prefs @@ -0,0 +1,4 @@ +activeProfiles= +eclipse.preferences.version=1 +resolveWorkspaceProjects=true +version=1 diff --git a/driver-core/.classpath b/driver-core/.classpath new file mode 100644 index 00000000000..fd7ad7fbda7 --- /dev/null +++ b/driver-core/.classpath @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/driver-core/.project b/driver-core/.project new file mode 100644 index 00000000000..3a2a8174f6f --- /dev/null +++ b/driver-core/.project @@ -0,0 +1,23 @@ + + + cassandra-driver-core + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.m2e.core.maven2Builder + + + + + + org.eclipse.jdt.core.javanature + org.eclipse.m2e.core.maven2Nature + + diff --git a/driver-core/.settings/org.eclipse.jdt.core.prefs b/driver-core/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 00000000000..60105c1b951 --- /dev/null +++ b/driver-core/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,5 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 +org.eclipse.jdt.core.compiler.compliance=1.6 +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.source=1.6 diff --git a/driver-core/.settings/org.eclipse.m2e.core.prefs b/driver-core/.settings/org.eclipse.m2e.core.prefs new file mode 100644 index 00000000000..f897a7f1cb2 --- /dev/null +++ b/driver-core/.settings/org.eclipse.m2e.core.prefs @@ -0,0 +1,4 @@ +activeProfiles= +eclipse.preferences.version=1 +resolveWorkspaceProjects=true +version=1 diff --git a/driver-core/pom.xml b/driver-core/pom.xml new file mode 100644 index 00000000000..3b0f0e698bc --- /dev/null +++ b/driver-core/pom.xml @@ -0,0 +1,23 @@ + + 4.0.0 + + com.datastax.cassandra + cassandra-driver-parent + 0.1.0-SNAPSHOT + + cassandra-driver-core + jar + Cassandra Java Driver - Core + http://www.datastax.com + + + + org.jboss.netty + netty + 3.2.7.Final + + + + + diff --git a/src/com/datastax/driver/core/AuthInfo.java b/driver-core/src/main/java/com/datastax/driver/core/AuthInfo.java similarity index 100% rename from src/com/datastax/driver/core/AuthInfo.java rename to driver-core/src/main/java/com/datastax/driver/core/AuthInfo.java diff --git a/src/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java similarity index 100% rename from src/com/datastax/driver/core/BoundStatement.java rename to driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java diff --git a/src/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java similarity index 100% rename from src/com/datastax/driver/core/CQLRow.java rename to driver-core/src/main/java/com/datastax/driver/core/CQLRow.java diff --git a/src/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java similarity index 100% rename from src/com/datastax/driver/core/Cluster.java rename to driver-core/src/main/java/com/datastax/driver/core/Cluster.java diff --git a/src/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java similarity index 100% rename from src/com/datastax/driver/core/Columns.java rename to driver-core/src/main/java/com/datastax/driver/core/Columns.java diff --git a/src/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java similarity index 100% rename from src/com/datastax/driver/core/DataType.java rename to driver-core/src/main/java/com/datastax/driver/core/DataType.java diff --git a/src/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java similarity index 100% rename from src/com/datastax/driver/core/PreparedStatement.java rename to driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java diff --git a/src/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java similarity index 100% rename from src/com/datastax/driver/core/ResultSet.java rename to driver-core/src/main/java/com/datastax/driver/core/ResultSet.java diff --git a/src/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java similarity index 100% rename from src/com/datastax/driver/core/Session.java rename to driver-core/src/main/java/com/datastax/driver/core/Session.java diff --git a/driver-jdbc/.classpath b/driver-jdbc/.classpath new file mode 100644 index 00000000000..fd7ad7fbda7 --- /dev/null +++ b/driver-jdbc/.classpath @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/driver-jdbc/.project b/driver-jdbc/.project new file mode 100644 index 00000000000..5ffae0b11a1 --- /dev/null +++ b/driver-jdbc/.project @@ -0,0 +1,23 @@ + + + cassandra-driver-jdbc + + + + + + org.eclipse.jdt.core.javabuilder + + + + + org.eclipse.m2e.core.maven2Builder + + + + + + org.eclipse.jdt.core.javanature + org.eclipse.m2e.core.maven2Nature + + diff --git a/driver-jdbc/.settings/org.eclipse.jdt.core.prefs b/driver-jdbc/.settings/org.eclipse.jdt.core.prefs new file mode 100644 index 00000000000..60105c1b951 --- /dev/null +++ b/driver-jdbc/.settings/org.eclipse.jdt.core.prefs @@ -0,0 +1,5 @@ +eclipse.preferences.version=1 +org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 +org.eclipse.jdt.core.compiler.compliance=1.6 +org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning +org.eclipse.jdt.core.compiler.source=1.6 diff --git a/driver-jdbc/.settings/org.eclipse.m2e.core.prefs b/driver-jdbc/.settings/org.eclipse.m2e.core.prefs new file mode 100644 index 00000000000..f897a7f1cb2 --- /dev/null +++ b/driver-jdbc/.settings/org.eclipse.m2e.core.prefs @@ -0,0 +1,4 @@ +activeProfiles= +eclipse.preferences.version=1 +resolveWorkspaceProjects=true +version=1 diff --git a/driver-jdbc/pom.xml b/driver-jdbc/pom.xml new file mode 100644 index 00000000000..be3178d4123 --- /dev/null +++ b/driver-jdbc/pom.xml @@ -0,0 +1,18 @@ + + 4.0.0 + + com.datastax.cassandra + cassandra-driver-parent + 0.1.0-SNAPSHOT + + cassandra-driver-jdbc + jar + Cassandra Java Driver - JDBC + http://www.datastax.com + + + + + + diff --git a/pom.xml b/pom.xml new file mode 100644 index 00000000000..56b37b9ced2 --- /dev/null +++ b/pom.xml @@ -0,0 +1,60 @@ + + 4.0.0 + com.datastax.cassandra + cassandra-driver-parent + pom + 0.1.0-SNAPSHOT + Cassandra Java Driver + http://www.datastax.com + + + driver-core + driver-jdbc + + + + + + org.apache.cassandra + cassandra-all + 1.1.2 + + + + log4j + log4j + 1.2.17 + + + + org.slf4j + slf4j-log4j12 + 1.6.6 + + + + junit + junit + 4.10 + test + + + + + + + maven-compiler-plugin + 2.5.1 + + 1.6 + 1.6 + true + true + true + + + + + + From 61f32d0ca1c8d048f2ed5f8d6c1e3f4c23b41572 Mon Sep 17 00:00:00 2001 From: mfiguiere Date: Thu, 19 Jul 2012 16:02:08 +0200 Subject: [PATCH 004/719] Fix settings/classpath --- .project | 17 ------------ .settings/org.eclipse.m2e.core.prefs | 4 --- driver-core/.classpath | 26 ------------------- driver-core/.project | 23 ---------------- .../.settings/org.eclipse.jdt.core.prefs | 5 ---- .../.settings/org.eclipse.m2e.core.prefs | 4 --- driver-jdbc/.classpath | 26 ------------------- driver-jdbc/.project | 23 ---------------- .../.settings/org.eclipse.jdt.core.prefs | 5 ---- .../.settings/org.eclipse.m2e.core.prefs | 4 --- 10 files changed, 137 deletions(-) delete mode 100644 .project delete mode 100644 .settings/org.eclipse.m2e.core.prefs delete mode 100644 driver-core/.classpath delete mode 100644 driver-core/.project delete mode 100644 driver-core/.settings/org.eclipse.jdt.core.prefs delete mode 100644 driver-core/.settings/org.eclipse.m2e.core.prefs delete mode 100644 driver-jdbc/.classpath delete mode 100644 driver-jdbc/.project delete mode 100644 driver-jdbc/.settings/org.eclipse.jdt.core.prefs delete mode 100644 driver-jdbc/.settings/org.eclipse.m2e.core.prefs diff --git a/.project b/.project deleted file mode 100644 index 79ff633c046..00000000000 --- a/.project +++ /dev/null @@ -1,17 +0,0 @@ - - - cassandra-driver-parent - - - - - - org.eclipse.m2e.core.maven2Builder - - - - - - org.eclipse.m2e.core.maven2Nature - - diff --git a/.settings/org.eclipse.m2e.core.prefs b/.settings/org.eclipse.m2e.core.prefs deleted file mode 100644 index f897a7f1cb2..00000000000 --- a/.settings/org.eclipse.m2e.core.prefs +++ /dev/null @@ -1,4 +0,0 @@ -activeProfiles= -eclipse.preferences.version=1 -resolveWorkspaceProjects=true -version=1 diff --git a/driver-core/.classpath b/driver-core/.classpath deleted file mode 100644 index fd7ad7fbda7..00000000000 --- a/driver-core/.classpath +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/driver-core/.project b/driver-core/.project deleted file mode 100644 index 3a2a8174f6f..00000000000 --- a/driver-core/.project +++ /dev/null @@ -1,23 +0,0 @@ - - - cassandra-driver-core - - - - - - org.eclipse.jdt.core.javabuilder - - - - - org.eclipse.m2e.core.maven2Builder - - - - - - org.eclipse.jdt.core.javanature - org.eclipse.m2e.core.maven2Nature - - diff --git a/driver-core/.settings/org.eclipse.jdt.core.prefs b/driver-core/.settings/org.eclipse.jdt.core.prefs deleted file mode 100644 index 60105c1b951..00000000000 --- a/driver-core/.settings/org.eclipse.jdt.core.prefs +++ /dev/null @@ -1,5 +0,0 @@ -eclipse.preferences.version=1 -org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 -org.eclipse.jdt.core.compiler.compliance=1.6 -org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning -org.eclipse.jdt.core.compiler.source=1.6 diff --git a/driver-core/.settings/org.eclipse.m2e.core.prefs b/driver-core/.settings/org.eclipse.m2e.core.prefs deleted file mode 100644 index f897a7f1cb2..00000000000 --- a/driver-core/.settings/org.eclipse.m2e.core.prefs +++ /dev/null @@ -1,4 +0,0 @@ -activeProfiles= -eclipse.preferences.version=1 -resolveWorkspaceProjects=true -version=1 diff --git a/driver-jdbc/.classpath b/driver-jdbc/.classpath deleted file mode 100644 index fd7ad7fbda7..00000000000 --- a/driver-jdbc/.classpath +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/driver-jdbc/.project b/driver-jdbc/.project deleted file mode 100644 index 5ffae0b11a1..00000000000 --- a/driver-jdbc/.project +++ /dev/null @@ -1,23 +0,0 @@ - - - cassandra-driver-jdbc - - - - - - org.eclipse.jdt.core.javabuilder - - - - - org.eclipse.m2e.core.maven2Builder - - - - - - org.eclipse.jdt.core.javanature - org.eclipse.m2e.core.maven2Nature - - diff --git a/driver-jdbc/.settings/org.eclipse.jdt.core.prefs b/driver-jdbc/.settings/org.eclipse.jdt.core.prefs deleted file mode 100644 index 60105c1b951..00000000000 --- a/driver-jdbc/.settings/org.eclipse.jdt.core.prefs +++ /dev/null @@ -1,5 +0,0 @@ -eclipse.preferences.version=1 -org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6 -org.eclipse.jdt.core.compiler.compliance=1.6 -org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning -org.eclipse.jdt.core.compiler.source=1.6 diff --git a/driver-jdbc/.settings/org.eclipse.m2e.core.prefs b/driver-jdbc/.settings/org.eclipse.m2e.core.prefs deleted file mode 100644 index f897a7f1cb2..00000000000 --- a/driver-jdbc/.settings/org.eclipse.m2e.core.prefs +++ /dev/null @@ -1,4 +0,0 @@ -activeProfiles= -eclipse.preferences.version=1 -resolveWorkspaceProjects=true -version=1 From 9e49b834702db9dee0f945d06a4c0d189d76264a Mon Sep 17 00:00:00 2001 From: mfiguiere Date: Thu, 19 Jul 2012 16:07:12 +0200 Subject: [PATCH 005/719] Add gitignore --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..f272b1c1b9b --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +target/ +.settings +.classpath +.project From a23ec4cdee242bf7d0beea7c29ae77643cb54aa7 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 19 Jul 2012 16:18:01 +0200 Subject: [PATCH 006/719] Bits of implementation for Cluster --- src/com/datastax/driver/core/Cluster.java | 126 +++++++++++++++++++++- 1 file changed, 123 insertions(+), 3 deletions(-) diff --git a/src/com/datastax/driver/core/Cluster.java b/src/com/datastax/driver/core/Cluster.java index e7671b464e3..45401f065a6 100644 --- a/src/com/datastax/driver/core/Cluster.java +++ b/src/com/datastax/driver/core/Cluster.java @@ -1,12 +1,18 @@ package com.datastax.driver.core; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + /** * Informations and known state of a Cassandra cluster. *

* This is the main entry point of the driver. A simple example of access to a * Cassandra cluster would be: * - * Cluster cluster = Cluster.Builder().addContactPoint("192.168.0.1").build(); + * Cluster cluster = Cluster.Builder().addContactPoints("192.168.0.1").build(); * Session session = cluster.connect("db1"); * * for (CQLRow row : session.execute("SELECT * FROM table1")) @@ -24,6 +30,26 @@ */ public class Cluster { + private final List contactPoints; + + private Cluster(List contactPoints) + { + this.contactPoints = contactPoints; + } + + /** + * Build a new cluster based on the provided configuration. + * + * Note that for building a cluster programmatically, Cluster.Builder + * provides a slightly less verbose alternative. + * + * @param config the Cluster.Configuration to use + * @return the newly created Cluster instance + */ + public static Cluster buildFrom(Configuration config) { + return new Cluster(config.contactPoints()); + } + /** * Creates a new session on this cluster. * @@ -68,7 +94,101 @@ public Session connect(String keyspace, AuthInfo authInfo) { return null; } - public class Builder { - // TODO + public interface Configuration { + + public List contactPoints(); + } + + public static class Builder { + + private static class Config implements Configuration + { + // TODO: might not be the best default port, look at changing in C* + private static final int DEFAULT_PORT = 8000; + + private List addresses = new ArrayList(); + + public List contactPoints() { + return addresses; + } + } + + private final Config config = new Config(); + + /** + * Adds a contact point. + * + * Contact points are addresses of Cassandra nodes that the driver uses + * to discover the cluster topology. Only one contact point is required + * (the driver will retrieve the address of the other nodes + * automatically), but it is usually a good idea to provide more than + * one contact point, as if that unique contact point is not available, + * the driver won't be able to initialize itself correctly. + * + * @param address the address of the node to connect to + * @param port the port to connect to + * @return this Builder + * + * @throws IllegalArgumentException if the port parameter is outside + * the range of valid port values, or if the hostname parameter is + * null. + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + */ + public Builder addContactPoint(String address, int port) { + config.addresses.add(new InetSocketAddress(address, port)); + return this; + } + + /** + * Add contact points using the default Cassandra port. + * + * @see addContactPoint for more details on contact points. + * + * @param addresses addresses of the nodes to add as contact point + * @return this Builder + * + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + */ + public Builder addContactPoints(String... addresses) { + for (String address : addresses) + addContactPoint(address, config.DEFAULT_PORT); + return this; + } + + /** + * Add contact points using the default Cassandra port. + * + * @see addContactPoint for more details on contact points. + * + * @param addresses addresses of the nodes to add as contact point + * @return this Builder + * + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + */ + public Builder addContactPoints(InetAddress... addresses) { + for (InetAddress address : addresses) + config.addresses.add(new InetSocketAddress(address, config.DEFAULT_PORT)); + return this; + } + + /** + * Add contact points. + * + * @see addContactPoint for more details on contact points. + * + * @param sockAddresses the socket addresses of the nodes to add as + * contact point + * @return this Builder + * + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + */ + public Builder addContactPoints(InetSocketAddress... addresses) { + config.addresses.addAll(Arrays.asList(addresses)); + return this; + } } } From 269f896c4dd568406cd6ba3e63bc0ab57bd7b0a7 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 19 Jul 2012 18:32:44 +0200 Subject: [PATCH 007/719] Start supporting the binary protocol --- .../driver/core/internal/Connection.java | 157 ++++++++++++++++++ .../core/internal/ConnectionException.java | 23 +++ .../core/internal/TransportException.java | 13 ++ 3 files changed, 193 insertions(+) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java new file mode 100644 index 00000000000..5a5a0d1802a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java @@ -0,0 +1,157 @@ +package com.datastax.driver.core.internal; + +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelPipelineFactory; + +import org.apache.cassandra.transport.*; + +/** + * A connection to a Cassandra Node. + */ +public class Connection extends org.apache.cassandra.transport.Connection +{ + public final InetSocketAddress address; + + private final ClientBootstrap bootstrap; + private final Channel channel; + private final Manager manager; + + private volatile ChannelFuture lastWriteFuture; + private volatile boolean shutdown; + + /** + * Create a new connection to a Cassandra node. + * + * The connection is open and initialized by the constructor. + * + * @throws ConnectionException if the connection attempts fails. + */ + private Connection(InetSocketAddress address, Manager manager) throws ConnectionException { + this.manager = manager; + this.bootstrap = manager.bootstrap(); + + bootstrap.setPipelineFactory(new PipelineFactory(this)); + + ChannelFuture future = bootstrap.connect(address); + + // Wait until the connection attempt succeeds or fails. + this.channel = future.awaitUninterruptibly().getChannel(); + if (!future.isSuccess()) + { + bootstrap.releaseExternalResources(); + throw new TransportException(address, "Cannot connect", future.getCause()); + } + } + + public Future write(Message.Request request) { + + if (shutdown) + throw new ConnectionException(address, "Connection has been closed"); + + request.attach(this); + inFlight.incrementAndGet(); + try { + + ChannelFuture future = channel.write(request); + future.awaitUninterruptibly(); + if (!future.isSuccess()) + throw new TransportException(address, "Error writting", future.getCause()); + + Message.Response msg = responseHandler.responses.take(); + if (msg instanceof ErrorMessage) + throw new RuntimeException(((ErrorMessage)msg).errorMsg); + return msg; + } finally { + inFlight.decrementAndGet(); + } + } + + public void close() { + + // Make sure all new writes are rejected + shutdown = true; + + try { + // Busy waiting, we just wait for request to be fully written, shouldn't take long + while (inFlight.get() > 0) { + time.sleep(10); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + channel.close().awaitUninterruptibly(); + bootstrap.releaseExternalResources(); + } + + // Cruft needed because we reuse server side classes, but we don't care about it + public void validateNewMessage(Message.Type type) {}; + public void applyStateTransition(Message.Type requestType, Message.Type responseType) {}; + public ClientState clientState() { return null; }; + + public static class Manager { + + private ClientBoostrap boostrap() { + ClientBoostrap b = new ClientBootstrap(new NioClientSocketChannelFactory(bossExecutor, workerExecutor)); + + // TODO: handle this better (use SocketChannelConfig) + b.setOption("connectTimeoutMillis", 10000); + b.setOption("tcpNoDelay", true); + b.setOption("keepAlive", true); + + return b; + } + + } + + private static class PipelineFactory implements ChannelPipelineFactory + { + // Stateless handlers + private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); + private static final Message.ProtocolEncoder messageEncoder = new Message.ProtocolEncoder(); + private static final Frame.Decompressor frameDecompressor = new Frame.Decompressor(); + private static final Frame.Compressor frameCompressor = new Frame.Compressor(); + private static final Frame.Encoder frameEncoder = new Frame.Encoder(); + + // One more fallout of using server side classes; not a big deal + private static final org.apache.cassandra.transport.Connection.Connection.Tracker tracker; + static { + tracker = new org.apache.cassandra.transport.Connection.Connection.Tracker() { + public void addConnection(Channel ch, Connection connection) {} + public void closeAll() {} + }; + } + + private final org.apache.cassandra.transport.Connection.Connection.Factory cfactory; + + public PipelineFactory(final Connection connection) { + this.cfactory = new org.apache.cassandra.transport.Connection.Factory() { + public Connection newConnection() { + return connection; + } + }; + } + + public ChannelPipeline getPipeline() throws Exception { + ChannelPipeline pipeline = Channels.pipeline(); + + //pipeline.addLast("debug", new LoggingHandler()); + + pipeline.addLast("frameDecoder", new Frame.Decoder(tracker, cfactory)); + pipeline.addLast("frameEncoder", frameEncoder); + + pipeline.addLast("frameDecompressor", frameDecompressor); + pipeline.addLast("frameCompressor", frameCompressor); + + pipeline.addLast("messageDecoder", messageDecoder); + pipeline.addLast("messageEncoder", messageEncoder); + + pipeline.addLast("handler", responseHandler); + + return pipeline; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java new file mode 100644 index 00000000000..30725a85209 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java @@ -0,0 +1,23 @@ +package com.datastax.driver.core.internal; + +public class ConnectionException extends Exception +{ + public final InetSocketAddress address; + + public ConnectionException(InetSocketAddress address, String msg, Throwable cause) + { + this(msg, cause); + this.address = address; + } + + public ConnectionException(InetSocketAddress address, String msg) + { + this(msg); + this.address = address; + } + + @Override + public getMessage() { + return String.format("[%s] %s", address, super.getMessage()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java b/driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java new file mode 100644 index 00000000000..66240bb9456 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java @@ -0,0 +1,13 @@ +package com.datastax.driver.core.internal; + +/** + * A connection exception that has to do with the transport itself, i.e. that + * suggests the node is down. + */ +public class TransportException extends ConnectionException +{ + public TransportException(InetSocketAddress address, String msg, Throwable cause) + { + super(address, msg, cause); + } +} From c30886091bf40855a0a9706b6583606eab83a1d9 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 20 Jul 2012 14:39:16 +0200 Subject: [PATCH 008/719] Small updates of the Session API --- .../com/datastax/driver/core/CQLQuery.java | 9 +++++++ .../com/datastax/driver/core/ResultSet.java | 5 ++++ .../com/datastax/driver/core/Session.java | 25 +++++++++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java b/driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java new file mode 100644 index 00000000000..efa8f67684e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java @@ -0,0 +1,9 @@ +package com.datastax.driver.core; + +/** + * A marker interface for classes representing a CQL query. + * + * This interface require no specific method, but the toString() method of a + * class implementing CQLQuery must return a CQL query string. + */ +public interface CQLQuery {} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 1c4a9bdfab2..03d17c4aaac 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -61,4 +61,9 @@ public List fetchAll() { public Iterator iterator() { return null; } + + public static class Future implements java.util.concurrent.Future + { + // TODO + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 25f455c5633..99f92d88567 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -46,6 +46,13 @@ public ResultSet execute(String query) { return null; } + /** + * @see #execute(String) + */ + public ResultSet execute(CQLQuery query) { + return null; + } + /** * Execute the provided query asynchronously. * @@ -63,7 +70,14 @@ public ResultSet execute(String query) { * @return the result of the query. That result will never be null be can * be empty and will be for any non SELECT query. */ - public ResultSet executeAsync(String query) { + public ResultSet.Future executeAsync(String query) { + return null; + } + + /** + * @see #executeAsync(String) + */ + public ResultSet.Future executeAsync(CQLQuery query) { return null; } @@ -77,6 +91,13 @@ public PreparedStatement prepare(String query) { return null; } + /** + * @see #prepare(String) + */ + public PreparedStatement prepare(CQLQuery query) { + return null; + } + /** * Execute a prepared statement that had values provided for its bound * variables. @@ -105,7 +126,7 @@ public ResultSet executePrepared(BoundStatement stmt) { * @return the result of the query. That result will never be null be can * be empty and will be for any non SELECT query. */ - public ResultSet executePreparedAsync(BoundStatement stmt) { + public ResultSet.Future executePreparedAsync(BoundStatement stmt) { return null; } } From 7b7199b51d84c7ddb171dcb943f00e68805ddb10 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 20 Jul 2012 18:11:06 +0200 Subject: [PATCH 009/719] Create connections successfully --- driver-core/pom.xml | 31 ++++ .../com/datastax/driver/core/Cluster.java | 66 ++++++--- .../com/datastax/driver/core/Columns.java | 30 ++-- .../com/datastax/driver/core/ResultSet.java | 2 +- .../com/datastax/driver/core/Session.java | 41 ++++- .../{internal => transport}/Connection.java | 140 ++++++++++++++---- .../ConnectionException.java | 10 +- .../TransportException.java | 9 +- .../driver/core/utils/SimpleFuture.java | 49 ++++++ .../com/datastax/driver/core/SessionTest.java | 33 +++++ pom.xml | 2 +- 11 files changed, 332 insertions(+), 81 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{internal => transport}/Connection.java (50%) rename driver-core/src/main/java/com/datastax/driver/core/{internal => transport}/ConnectionException.java (73%) rename driver-core/src/main/java/com/datastax/driver/core/{internal => transport}/TransportException.java (62%) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/SessionTest.java diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 3b0f0e698bc..e4c4e63e79c 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -17,7 +17,38 @@ netty 3.2.7.Final + + + com.google.guava + guava + 12.0 + + + + org.apache.cassandra + cassandra-thrift + 1.2.0-SNAPSHOT + + + + org.apache.thrift + libthrift + 0.7.0 + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.5 + + false + + + + + diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 45401f065a6..4db018bb201 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -6,13 +6,15 @@ import java.util.Arrays; import java.util.List; +import com.datastax.driver.core.transport.ConnectionException; + /** * Informations and known state of a Cassandra cluster. *

* This is the main entry point of the driver. A simple example of access to a * Cassandra cluster would be: * - * Cluster cluster = Cluster.Builder().addContactPoints("192.168.0.1").build(); + * Cluster cluster = new Cluster.Builder().addContactPoints("192.168.0.1").build(); * Session session = cluster.connect("db1"); * * for (CQLRow row : session.execute("SELECT * FROM table1")) @@ -32,8 +34,7 @@ public class Cluster { private final List contactPoints; - private Cluster(List contactPoints) - { + private Cluster(List contactPoints) { this.contactPoints = contactPoints; } @@ -41,7 +42,7 @@ private Cluster(List contactPoints) * Build a new cluster based on the provided configuration. * * Note that for building a cluster programmatically, Cluster.Builder - * provides a slightly less verbose alternative. + * provides a slightly less verbose shortcut with {@link Builder#build}. * * @param config the Cluster.Configuration to use * @return the newly created Cluster instance @@ -56,7 +57,12 @@ public static Cluster buildFrom(Configuration config) { * @return a new session on this cluster sets to no keyspace. */ public Session connect() { - return null; + try { + return new Session(contactPoints); + } catch (ConnectionException e) { + // TODO: Figure what exception we want to return (but maybe the ConnectionException is good enough) + throw new RuntimeException(e); + } } /** @@ -74,9 +80,9 @@ public Session connect(AuthInfo authInfo) { * Creates a new session on this cluster and sets a keyspace to use. * * @param keyspaceName The name of the keyspace to use for the created - * Session. This can be later changed using {@link Session#use}. + * {@code Session}. This can be later changed using {@link Session#use}. * @return a new session on this cluster sets to keyspace - * keyspaceName. + * {@code keyspaceName}. */ public Session connect(String keyspace) { return null; @@ -88,7 +94,7 @@ public Session connect(String keyspace) { * @param authInfo The authorisation credentials to use to connect to * Cassandra nodes. * @return a new session on this cluster sets to keyspace - * keyspaceName. + * {@code keyspaceName}. */ public Session connect(String keyspace, AuthInfo authInfo) { return null; @@ -99,22 +105,17 @@ public interface Configuration { public List contactPoints(); } - public static class Builder { + public static class Builder implements Configuration { - private static class Config implements Configuration - { - // TODO: might not be the best default port, look at changing in C* - private static final int DEFAULT_PORT = 8000; + // TODO: might not be the best default port, look at changing in C* + private static final int DEFAULT_PORT = 8000; - private List addresses = new ArrayList(); + private List addresses = new ArrayList(); - public List contactPoints() { - return addresses; - } + public List contactPoints() { + return addresses; } - private final Config config = new Config(); - /** * Adds a contact point. * @@ -136,10 +137,25 @@ public List contactPoints() { * permission to resolve the host name is denied. */ public Builder addContactPoint(String address, int port) { - config.addresses.add(new InetSocketAddress(address, port)); + this.addresses.add(new InetSocketAddress(address, port)); return this; } + /** + * Add a contact point using the default Cassandra port. + * + * @see addContactPoint for more details on contact points. + * + * @param address the address of the node to add as contact point + * @return this Builder + * + * @throws SecurityException if a security manager is present and + * permission to resolve the host name is denied. + */ + public Builder addContactPoint(String address) { + return addContactPoint(address, DEFAULT_PORT); + } + /** * Add contact points using the default Cassandra port. * @@ -153,7 +169,7 @@ public Builder addContactPoint(String address, int port) { */ public Builder addContactPoints(String... addresses) { for (String address : addresses) - addContactPoint(address, config.DEFAULT_PORT); + addContactPoint(address, DEFAULT_PORT); return this; } @@ -170,7 +186,7 @@ public Builder addContactPoints(String... addresses) { */ public Builder addContactPoints(InetAddress... addresses) { for (InetAddress address : addresses) - config.addresses.add(new InetSocketAddress(address, config.DEFAULT_PORT)); + this.addresses.add(new InetSocketAddress(address, DEFAULT_PORT)); return this; } @@ -187,8 +203,12 @@ public Builder addContactPoints(InetAddress... addresses) { * permission to resolve the host name is denied. */ public Builder addContactPoints(InetSocketAddress... addresses) { - config.addresses.addAll(Arrays.asList(addresses)); + this.addresses.addAll(Arrays.asList(addresses)); return this; } + + public Cluster build() { + return Cluster.buildFrom(this); + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index b07c7a19b80..4fef57260c9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -7,7 +7,7 @@ public class Columns { /** - * Returns the number of columns described by this Columns + * Returns the number of columns described by this {@code Columns} * instance. * * @return the number of columns described by this metadata. @@ -17,63 +17,63 @@ public int count() { } /** - * Returns the name of the ith column in this metadata. + * Returns the name of the {@code i}th column in this metadata. * - * @return the name of the ith column in this metadata. + * @return the name of the {@code i}th column in this metadata. */ public String name(int i) { return null; } /** - * Returns the type of the ith column in this metadata. + * Returns the type of the {@code i}th column in this metadata. * - * @return the type of the ith column in this metadata. + * @return the type of the {@code i}th column in this metadata. */ public DataType type(int i) { return null; } /** - * Returns the type of column name in this metadata. + * Returns the type of column {@code name} in this metadata. * - * @return the type of column name in this metadata. + * @return the type of column {@code name} in this metadata. */ public DataType type(String name) { return null; } /** - * Returns the keyspace of the ith column in this metadata. + * Returns the keyspace of the {@code i}th column in this metadata. * - * @return the keyspace of the ith column in this metadata. + * @return the keyspace of the {@code i}th column in this metadata. */ public String keyspace(int i) { return null; } /** - * Returns the keyspace of column name in this metadata. + * Returns the keyspace of column {@code name} in this metadata. * - * @return the keyspace of column name in this metadata. + * @return the keyspace of column {@code name} in this metadata. */ public String keyspace(String name) { return null; } /** - * Returns the table of the ith column in this metadata. + * Returns the table of the {@code i}th column in this metadata. * - * @return the table of the ith column in this metadata. + * @return the table of the {@code i}th column in this metadata. */ public String table(int i) { return null; } /** - * Returns the table of column name in this metadata. + * Returns the table of column {@code name} in this metadata. * - * @return the table of column name in this metadata. + * @return the table of column {@code name} in this metadata. */ public String table(String name) { return null; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 03d17c4aaac..6e0b1a9fae5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -62,7 +62,7 @@ public Iterator iterator() { return null; } - public static class Future implements java.util.concurrent.Future + public static class Future // implements java.util.concurrent.Future { // TODO } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 99f92d88567..d1487f0d6df 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -1,5 +1,17 @@ package com.datastax.driver.core; +import java.util.List; +import java.net.InetSocketAddress; + +import com.datastax.driver.core.transport.Connection; +import com.datastax.driver.core.transport.ConnectionException; + +import org.apache.cassandra.transport.Message; +import org.apache.cassandra.transport.messages.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A session holds connections to a Cassandra cluster, allowing to query it. * @@ -14,6 +26,17 @@ */ public class Session { + private static final Logger logger = LoggerFactory.getLogger(Session.class); + + // TODO: we can do better :) + private final Connection connection; + + // Package protected, only Cluster should construct that. + Session(List addresses) throws ConnectionException { + Connection.Factory factory = new Connection.Factory(addresses.get(0)); + this.connection = factory.open(); + } + /** * Sets the current keyspace to use for this session. * @@ -43,14 +66,24 @@ public Session use(String keyspace) { * be empty and will be for any non SELECT query. */ public ResultSet execute(String query) { - return null; + + // TODO: this is not the real deal, just for tests + try { + QueryMessage msg = new QueryMessage(query); + Connection.Future future = connection.write(msg); + Message.Response response = future.get(); + logger.info("Got " + response); + return null; + } catch (Exception e) { + throw new RuntimeException(e); + } } /** * @see #execute(String) */ public ResultSet execute(CQLQuery query) { - return null; + return execute(query.toString()); } /** @@ -85,7 +118,7 @@ public ResultSet.Future executeAsync(CQLQuery query) { * Prepare the provided query. * * @param query the CQL query to prepare - * @return the prepared statement corresponding to query. + * @return the prepared statement corresponding to {@code query}. */ public PreparedStatement prepare(String query) { return null; @@ -95,7 +128,7 @@ public PreparedStatement prepare(String query) { * @see #prepare(String) */ public PreparedStatement prepare(CQLQuery query) { - return null; + return prepare(query.toString()); } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java similarity index 50% rename from driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java rename to driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 5a5a0d1802a..059939bedb6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/internal/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -1,13 +1,19 @@ -package com.datastax.driver.core.internal; +package com.datastax.driver.core.transport; -import org.jboss.netty.channel.Channel; -import org.jboss.netty.channel.Channels; -import org.jboss.netty.channel.ChannelFuture; -import org.jboss.netty.channel.ChannelPipeline; -import org.jboss.netty.channel.ChannelPipelineFactory; +import com.datastax.driver.core.utils.SimpleFuture; +import java.net.InetSocketAddress; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.cassandra.service.ClientState; import org.apache.cassandra.transport.*; +import org.jboss.netty.bootstrap.ClientBootstrap; +import org.jboss.netty.channel.*; +import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; + /** * A connection to a Cassandra Node. */ @@ -17,9 +23,10 @@ public class Connection extends org.apache.cassandra.transport.Connection private final ClientBootstrap bootstrap; private final Channel channel; - private final Manager manager; + private final Factory factory; + private final Dispatcher dispatcher = new Dispatcher(); - private volatile ChannelFuture lastWriteFuture; + private AtomicInteger inFlight = new AtomicInteger(0); private volatile boolean shutdown; /** @@ -29,9 +36,10 @@ public class Connection extends org.apache.cassandra.transport.Connection * * @throws ConnectionException if the connection attempts fails. */ - private Connection(InetSocketAddress address, Manager manager) throws ConnectionException { - this.manager = manager; - this.bootstrap = manager.bootstrap(); + private Connection(InetSocketAddress address, Factory factory) throws ConnectionException { + this.address = address; + this.factory = factory; + this.bootstrap = factory.bootstrap(); bootstrap.setPipelineFactory(new PipelineFactory(this)); @@ -46,38 +54,52 @@ private Connection(InetSocketAddress address, Manager manager) throws Connection } } - public Future write(Message.Request request) { - + /** + * Write a request on this connection. + * + * @param request the request to send + * @return a future on the server response + * + * @throws ConnectionException if the connection is closed + * @throws TransportException if an I/O error while sending the request + */ + public Future write(Message.Request request) throws ConnectionException { if (shutdown) throw new ConnectionException(address, "Connection has been closed"); request.attach(this); - inFlight.incrementAndGet(); + + // We only support synchronous mode so far + if (!inFlight.compareAndSet(0, 1)) + throw new RuntimeException("Busy connection (this should not happen, please open a bug report if you see this)"); + try { - ChannelFuture future = channel.write(request); - future.awaitUninterruptibly(); - if (!future.isSuccess()) - throw new TransportException(address, "Error writting", future.getCause()); + Future future = new Future(this); + + // TODO: This assumes the connection is used synchronously, fix that at some point + dispatcher.setFuture(future); + + ChannelFuture writeFuture = channel.write(request); + writeFuture.awaitUninterruptibly(); + if (!writeFuture.isSuccess()) + throw new TransportException(address, "Error writting", writeFuture.getCause()); + + return future; - Message.Response msg = responseHandler.responses.take(); - if (msg instanceof ErrorMessage) - throw new RuntimeException(((ErrorMessage)msg).errorMsg); - return msg; } finally { inFlight.decrementAndGet(); } } public void close() { - // Make sure all new writes are rejected shutdown = true; try { // Busy waiting, we just wait for request to be fully written, shouldn't take long while (inFlight.get() > 0) { - time.sleep(10); + Thread.sleep(10); } } catch (InterruptedException e) { throw new RuntimeException(e); @@ -92,10 +114,30 @@ public void close() { public void applyStateTransition(Message.Type requestType, Message.Type responseType) {}; public ClientState clientState() { return null; }; - public static class Manager { + public static class Factory { + + private final ExecutorService bossExecutor = Executors.newCachedThreadPool(); + private final ExecutorService workerExecutor = Executors.newCachedThreadPool(); + + private final InetSocketAddress address; + + public Factory(InetSocketAddress address) { + this.address = address; + } + + /** + * Opens a new connection to the node this factory points to. + * + * @return the newly created (and initialized) connection. + * + * @throws ConnectionException if connection attempt fails. + */ + public Connection open() throws ConnectionException { + return new Connection(address, this); + } - private ClientBoostrap boostrap() { - ClientBoostrap b = new ClientBootstrap(new NioClientSocketChannelFactory(bossExecutor, workerExecutor)); + private ClientBootstrap bootstrap() { + ClientBootstrap b = new ClientBootstrap(new NioClientSocketChannelFactory(bossExecutor, workerExecutor)); // TODO: handle this better (use SocketChannelConfig) b.setOption("connectTimeoutMillis", 10000); @@ -107,6 +149,38 @@ private ClientBoostrap boostrap() { } + private class Dispatcher extends SimpleChannelUpstreamHandler { + + private volatile Future future; + private volatile Exception exception; + + public void setFuture(Future future) { + this.future = future; + } + + @Override + public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { + // TODO: we should do something better than just throwing an exception + if (future == null) + throw new RuntimeException("Not future set"); + + if (!(e.getMessage() instanceof Message.Response)) { + future.setException(new TransportException(address, "Unexpected message received: " + e.getMessage())); + } else { + future.set((Message.Response)e.getMessage()); + } + future = null; + } + } + + public static class Future extends SimpleFuture { + private final Connection connection; + + public Future(Connection connection) { + this.connection = connection; + } + } + private static class PipelineFactory implements ChannelPipelineFactory { // Stateless handlers @@ -117,17 +191,19 @@ private static class PipelineFactory implements ChannelPipelineFactory private static final Frame.Encoder frameEncoder = new Frame.Encoder(); // One more fallout of using server side classes; not a big deal - private static final org.apache.cassandra.transport.Connection.Connection.Tracker tracker; + private static final org.apache.cassandra.transport.Connection.Tracker tracker; static { - tracker = new org.apache.cassandra.transport.Connection.Connection.Tracker() { - public void addConnection(Channel ch, Connection connection) {} + tracker = new org.apache.cassandra.transport.Connection.Tracker() { + public void addConnection(Channel ch, org.apache.cassandra.transport.Connection connection) {} public void closeAll() {} }; } - private final org.apache.cassandra.transport.Connection.Connection.Factory cfactory; + private final Connection connection; + private final org.apache.cassandra.transport.Connection.Factory cfactory; public PipelineFactory(final Connection connection) { + this.connection = connection; this.cfactory = new org.apache.cassandra.transport.Connection.Factory() { public Connection newConnection() { return connection; @@ -149,7 +225,7 @@ public ChannelPipeline getPipeline() throws Exception { pipeline.addLast("messageDecoder", messageDecoder); pipeline.addLast("messageEncoder", messageEncoder); - pipeline.addLast("handler", responseHandler); + pipeline.addLast("dispatcher", connection.dispatcher); return pipeline; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/transport/ConnectionException.java similarity index 73% rename from driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java rename to driver-core/src/main/java/com/datastax/driver/core/transport/ConnectionException.java index 30725a85209..4797408ede1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/internal/ConnectionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/ConnectionException.java @@ -1,4 +1,6 @@ -package com.datastax.driver.core.internal; +package com.datastax.driver.core.transport; + +import java.net.InetSocketAddress; public class ConnectionException extends Exception { @@ -6,18 +8,18 @@ public class ConnectionException extends Exception public ConnectionException(InetSocketAddress address, String msg, Throwable cause) { - this(msg, cause); + super(msg, cause); this.address = address; } public ConnectionException(InetSocketAddress address, String msg) { - this(msg); + super(msg); this.address = address; } @Override - public getMessage() { + public String getMessage() { return String.format("[%s] %s", address, super.getMessage()); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java b/driver-core/src/main/java/com/datastax/driver/core/transport/TransportException.java similarity index 62% rename from driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java rename to driver-core/src/main/java/com/datastax/driver/core/transport/TransportException.java index 66240bb9456..1f7b123966b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/internal/TransportException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/TransportException.java @@ -1,4 +1,6 @@ -package com.datastax.driver.core.internal; +package com.datastax.driver.core.transport; + +import java.net.InetSocketAddress; /** * A connection exception that has to do with the transport itself, i.e. that @@ -10,4 +12,9 @@ public TransportException(InetSocketAddress address, String msg, Throwable cause { super(address, msg, cause); } + + public TransportException(InetSocketAddress address, String msg) + { + super(address, msg); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java b/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java new file mode 100644 index 00000000000..8ce8f02d24c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java @@ -0,0 +1,49 @@ +package com.datastax.driver.core.utils; + +import com.google.common.util.concurrent.AbstractFuture; + +/** + * A simple future that can be set to a value. + * + * Note: this is equivalent to guava SettableFuture, but the latter is a final + * class which is dumb. + */ +public class SimpleFuture extends AbstractFuture { + + /** + * Creates a new {@code SimpleFuture}. + */ + public static SimpleFuture create() { + return new SimpleFuture(); + } + + protected SimpleFuture() {} + + /** + * Sets the value of this future. This method will return {@code true} if + * the value was successfully set, or {@code false} if the future has already + * been set or cancelled. + * + * @param value the value the future should hold. + * @return true if the value was successfully set. + */ + @Override + public boolean set(V value) { + return super.set(value); + } + + /** + * Sets the future to having failed with the given exception. This exception + * will be wrapped in an {@code ExecutionException} and thrown from the {@code + * get} methods. This method will return {@code true} if the exception was + * successfully set, or {@code false} if the future has already been set or + * cancelled. + * + * @param throwable the exception the future should hold. + * @return true if the exception was successfully set. + */ + @Override + public boolean setException(Throwable throwable) { + return super.setException(throwable); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java new file mode 100644 index 00000000000..ba5c2126c54 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -0,0 +1,33 @@ +package com.datastax.driver.core; + +import org.junit.BeforeClass; +import org.junit.Test; +import static junit.framework.Assert.*; + +import org.apache.log4j.ConsoleAppender; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; + +public class SessionTest { + + // I really think we should make sure the library doesn't complain about + // log4j by default, but for now let's deal with it locally + @BeforeClass + public static void classSetUp() { + Logger rootLogger = Logger.getRootLogger(); + if (!rootLogger.getAllAppenders().hasMoreElements()) { + rootLogger.setLevel(Level.INFO); + rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); + } + } + + @Test + public void SimpleExecuteTest() throws Exception { + + Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); + Session session = cluster.connect(); + + session.execute("SELECT * FROM system.local"); + } +} diff --git a/pom.xml b/pom.xml index 56b37b9ced2..900cf8120f7 100644 --- a/pom.xml +++ b/pom.xml @@ -18,7 +18,7 @@ org.apache.cassandra cassandra-all - 1.1.2 + 1.2.0-SNAPSHOT From c5e785bbaf4ca7e0a245ee49b14a6daf837897e8 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 20 Jul 2012 18:26:22 +0200 Subject: [PATCH 010/719] Initialize connections --- .../driver/core/transport/Connection.java | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 059939bedb6..1f548733c8a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -3,12 +3,15 @@ import com.datastax.driver.core.utils.SimpleFuture; import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.transport.*; +import org.apache.cassandra.transport.messages.*; import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.channel.*; @@ -19,6 +22,9 @@ */ public class Connection extends org.apache.cassandra.transport.Connection { + // TODO: that doesn't belong here + private static final String CQL_VERSION = "3.0.0"; + public final InetSocketAddress address; private final ClientBootstrap bootstrap; @@ -34,7 +40,8 @@ public class Connection extends org.apache.cassandra.transport.Connection * * The connection is open and initialized by the constructor. * - * @throws ConnectionException if the connection attempts fails. + * @throws ConnectionException if the connection attempts fails or is + * refused by the server. */ private Connection(InetSocketAddress address, Factory factory) throws ConnectionException { this.address = address; @@ -52,6 +59,33 @@ private Connection(InetSocketAddress address, Factory factory) throws Connection bootstrap.releaseExternalResources(); throw new TransportException(address, "Cannot connect", future.getCause()); } + + initializeTransport(); + } + + private void initializeTransport() throws ConnectionException { + + // TODO: we will need to get fancy about handling protocol version at + // some point, but keep it simple for now. + // TODO: we need to allow setting the compression to use + StartupMessage startup = new StartupMessage(CQL_VERSION, Collections.emptyMap()); + try { + Message.Response response = write(startup).get(); + switch (response.type) { + case READY: + break; + case ERROR: + throw new TransportException(address, String.format("Error initializing connection: %s", ((ErrorMessage)response).errorMsg)); + case AUTHENTICATE: + throw new TransportException(address, "Authentication required but not yet supported"); + default: + throw new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type)); + } + } catch (ExecutionException e) { + throw new ConnectionException(address, "Unexpected error during transport initialization", e.getCause()); + } catch (InterruptedException e) { + throw new RuntimeException(); + } } /** From 2a1b81d7a672385b3d4bc77673d1b8aebca4cf86 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 23 Jul 2012 15:03:21 +0200 Subject: [PATCH 011/719] Implements most parts of ResultSet --- .../java/com/datastax/driver/core/CQLRow.java | 228 +++++++++++++++++- .../com/datastax/driver/core/Columns.java | 122 +++++++++- .../com/datastax/driver/core/DataType.java | 114 ++++++++- .../com/datastax/driver/core/ResultSet.java | 75 +++++- .../com/datastax/driver/core/Session.java | 10 +- .../datastax/driver/core/transport/Codec.java | 86 +++++++ .../com/datastax/driver/core/SessionTest.java | 5 +- 7 files changed, 608 insertions(+), 32 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 995b5f3bc88..ebd4982a792 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -1,52 +1,258 @@ package com.datastax.driver.core; -import java.util.Date; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.*; + +import com.datastax.driver.core.codec.Codec; + +import org.apache.cassandra.db.marshal.*; /** * A CQL Row returned in a {@link ResultSet}. */ public class CQLRow { + private final Columns metadata; + private final List data; + + private CQLRow(Columns metadata, List data) { + this.metadata = metadata; + this.data = data; + } + + static CQLRow fromData(Columns metadata, List data) { + if (data == null) + return null; + + return new CQLRow(metadata, data); + } + /** * The columns contains in this CQLRow. * * @return the columns contained in this CQLRow. */ public Columns columns() { - return null; + return metadata; + } + + public boolean isNull(int i) { + checkBounds(i); + return data.get(i) != null; + } + + public boolean isNull(String name) { + return isNull(metadata.getIdx(name)); } + /** + */ public boolean getBool(int i) { - return false; + checkType(i, DataType.Native.BOOLEAN); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return false; + + return BooleanType.instance.compose(value); } public boolean getBool(String name) { - return false; + return getBool(metadata.getIdx(name)); } public int getInt(int i) { - return 0; + checkType(i, DataType.Native.INT); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return 0; + + return Int32Type.instance.compose(value); } public int getInt(String name) { - return 0; + return getInt(metadata.getIdx(name)); } public long getLong(int i) { - return 0; + DataType type = checkType(i, DataType.Native.BIGINT, + DataType.Native.TIMESTAMP, + DataType.Native.INT, + DataType.Native.COUNTER); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return 0L; + + return type == DataType.Native.INT + ? (long)Int32Type.instance.compose(value) + : LongType.instance.compose(value); } public long getLong(String name) { - return 0; + return getLong(metadata.getIdx(name)); } public Date getDate(int i) { - return null; + checkType(i, DataType.Native.TIMESTAMP); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return null; + + return DateType.instance.compose(value); } public Date getDate(String name) { - return null; + return getDate(metadata.getIdx(name)); + } + + public float getFloat(int i) { + checkType(i, DataType.Native.FLOAT); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return 0.0f; + + return FloatType.instance.compose(value); + } + + public float getFloat(String name) { + return getFloat(metadata.getIdx(name)); + } + + public double getDouble(int i) { + DataType type = checkType(i, DataType.Native.DOUBLE, + DataType.Native.FLOAT); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return 0.0; + + return type == DataType.Native.FLOAT + ? (double)FloatType.instance.compose(value) + : DoubleType.instance.compose(value); + } + + public ByteBuffer getByteBuffer(int i) { + checkBounds(i); + + ByteBuffer value = data.get(i); + if (value == null) + return null; + + return value.duplicate(); } - // ... + public ByteBuffer getByteBuffer(String name) { + return getByteBuffer(metadata.getIdx(name)); + } + + public byte[] getBytes(int i) { + ByteBuffer bb = getByteBuffer(i); + byte[] result = new byte[bb.remaining()]; + bb.get(result); + return result; + } + + public byte[] getBytes(String name) { + return getBytes(metadata.getIdx(name)); + } + + public String getString(int i) { + DataType type = checkType(i, DataType.Native.VARCHAR, + DataType.Native.TEXT, + DataType.Native.ASCII); + + ByteBuffer value = data.get(i); + if (value == null) + return null; + + return type == DataType.Native.ASCII + ? AsciiType.instance.compose(value) + : UTF8Type.instance.compose(value); + } + + public String getString(String name) { + return getString(metadata.getIdx(name)); + } + + public BigInteger getVarInt(int i) { + checkType(i, DataType.Native.VARINT); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return null; + + return IntegerType.instance.compose(value); + } + + public BigInteger getVarInt(String name) { + return getVarInt(metadata.getIdx(name)); + } + + public BigDecimal getDecimal(int i) { + checkType(i, DataType.Native.DECIMAL); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return null; + + return DecimalType.instance.compose(value); + } + + public BigDecimal getDecimal(String name) { + return getDecimal(metadata.getIdx(name)); + } + + public UUID getUUID(int i) { + DataType type = checkType(i, DataType.Native.UUID, DataType.Native.TIMEUUID); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return null; + + return type == DataType.Native.UUID + ? UUIDType.instance.compose(value) + : TimeUUIDType.instance.compose(value); + } + + public UUID getUUID(String name) { + return getUUID(metadata.getIdx(name)); + } + + private DataType checkType(int i, DataType.Native... types) { + DataType defined = metadata.type(i); + for (DataType.Native type : types) + if (type == defined) + return type; + + // TODO: change that exception + throw new IllegalArgumentException(String.format("Column %s is of type %s", metadata.name(i), defined)); + } + + private void checkBounds(int i) { + if (i < 0 || i >= metadata.count()) + throw new ArrayIndexOutOfBoundsException(i); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("CQLRow["); + for (int i = 0; i < metadata.count(); i++) { + if (i != 0) + sb.append(", "); + ByteBuffer bb = data.get(i); + if (bb == null) + sb.append("NULL"); + else + sb.append(Codec.getCodec(metadata.type(i)).getString(bb)); + } + sb.append("]"); + return sb.toString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 4fef57260c9..77ce6ac7a88 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -1,10 +1,30 @@ package com.datastax.driver.core; +import java.util.*; + +import com.datastax.driver.core.codec.Codec; + +import org.apache.cassandra.cql3.ColumnSpecification; + /** * Metadata describing the columns returned in a {@link ResultSet} or a * {@link PreparedStatement}. */ -public class Columns { +public class Columns implements Iterable { + + static final Columns EMPTY = new Columns(new Definition[0]); + + private final Definition[] byIdx; + private final Map byName; + + Columns(Definition[] defs) { + + this.byIdx = defs; + this.byName = new HashMap(defs.length); + + for (int i = 0; i < defs.length; i++) + this.byName.put(defs[i].name, i); + } /** * Returns the number of columns described by this {@code Columns} @@ -13,70 +33,154 @@ public class Columns { * @return the number of columns described by this metadata. */ public int count() { - return 0; + return byIdx.length; + } + + /** + * Returns whether this metadata contains a given column. + * + * @return {@code true} if this metadata contains the column named {@code name}, + * {@code false} otherwise. + */ + public boolean contains(String name) { + return byName.containsKey(name); + } + + /** + * Returns an iterator over the {@link Definition} contained in this metadata. + * + * The order of the iterator will be the one of this metadata. + * + * @return an iterator over the {@link Definition} contained in this metadata. + */ + public Iterator iterator() { + return Arrays.asList(byIdx).iterator(); + } + + /** + * Returns a list containing all the definitions of this metadata in order. + * + * @return a list of the {@link Definition} contained in this metadata. + */ + public List asList() { + return Arrays.asList(byIdx); } /** * Returns the name of the {@code i}th column in this metadata. * * @return the name of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ public String name(int i) { - return null; + return byIdx[i].name; } /** * Returns the type of the {@code i}th column in this metadata. * * @return the type of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ public DataType type(int i) { - return null; + return byIdx[i].type; } /** * Returns the type of column {@code name} in this metadata. * * @return the type of column {@code name} in this metadata. + * + * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ public DataType type(String name) { - return null; + return type(getIdx(name)); } /** * Returns the keyspace of the {@code i}th column in this metadata. * * @return the keyspace of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ public String keyspace(int i) { - return null; + return byIdx[i].keyspace; } /** * Returns the keyspace of column {@code name} in this metadata. * * @return the keyspace of column {@code name} in this metadata. + * + * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ public String keyspace(String name) { - return null; + return keyspace(getIdx(name)); } /** * Returns the table of the {@code i}th column in this metadata. * * @return the table of the {@code i}th column in this metadata. + * + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ public String table(int i) { - return null; + return byIdx[i].table; } /** * Returns the table of column {@code name} in this metadata. * * @return the table of column {@code name} in this metadata. + * + * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ public String table(String name) { - return null; + return table(getIdx(name)); } + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Columns["); + for (int i = 0; i < count(); i++) { + if (i != 0) + sb.append(", "); + Definition def = byIdx[i]; + sb.append(def.name).append("(").append(def.type).append(")"); + } + sb.append("]"); + return sb.toString(); + } + + int getIdx(String name) { + Integer idx = byName.get(name); + if (idx == null) + throw new IllegalArgumentException(name + " is not a column defined in this metadata"); + + return idx; + } + + public static class Definition { + + public final String keyspace; + public final String table; + public final String name; + public final DataType type; + + private Definition(String keyspace, String table, String name, DataType type) { + + this.keyspace = keyspace; + this.table = table; + this.name = name; + this.type = type; + } + + static Definition fromTransportSpecification(ColumnSpecification spec) { + return new Definition(spec.ksName, spec.cfName, spec.name.toString(), Codec.rawTypeToDataType(spec.type)); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index 92b5b908a2c..0a1c0dda1e0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -3,6 +3,116 @@ /** * Supported data types for columns. */ -public class DataType { - // TODO +public interface DataType { + + /** + * The three kind of type supported by Cassandra. + * + * The {@code NATIVE} types supported by Cassandra are described in the + * CQL documentation, + * and more information on such type can be obtained using the {#asNative} + * method. + * + * The {@code COLLECTION} types the maps, lists and sets. More information + * on such type can be obtained using the {#asCollection} method. + * + * The {@code CUSTOM} types are user defined types. More information on + * such type can be obtained using the {#asCustom} method. + */ + public enum Kind { NATIVE, COLLECTION, CUSTOM } + + /** + * Returns this type {@link Kind}. + * + * @return this type {@link Kind}. + */ + public Kind kind(); + + /** + * Returns this type as a {@link Native} type. + * + * @return this type as a {@link Native} type. + * + * @throws IllegalStateException if this type is not a {@link Native} type. + * You should use {@link #kind} to check if this type is a native one + * before calling this method. + */ + public Native asNative(); + + /** + * Returns this type as a {@link Collection} type. + * + * @return this type as a {@link Collection} type. + * + * @throws IllegalStateException if this type is not a {@link Collection} + * type. You should use {@link #kind} to check if this type is a collection + * one before calling this method. + */ + public Collection asCollection(); + + /** + * Returns this type as a {@link Custom} type. + * + * @return this type as a {@link Custom} type. + * + * @throws IllegalStateException if this type is not a {@link Custom} type. + * You should use {@link #kind} to check if this type is a custom one + * before calling this method. + */ + public Custom asCustom(); + + /** + * Native types supported by cassandra. + */ + public enum Native implements DataType { + + ASCII, + BIGINT, + BLOB, + BOOLEAN, + COUNTER, + DECIMAL, + DOUBLE, + FLOAT, + INET, + INT, + TEXT, + TIMESTAMP, + UUID, + VARCHAR, + VARINT, + TIMEUUID; + + public Kind kind() { return Kind.NATIVE; } + + public Native asNative() { return this; } + public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a native one"); } + public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a native one"); } + } + + /** + * A collection type (lists, sets and maps). + */ + public static class Collection implements DataType { + // TODO + + public Kind kind() { return Kind.COLLECTION; } + + public Native asNative() { throw new IllegalStateException("Not a native type, but a collection one"); } + public Collection asCollection() { return this; } + public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a collection one"); } + } + + /** + * A used defined custom type. + */ + public static class Custom implements DataType { + // TODO + + public Kind kind() { return Kind.CUSTOM; } + + public Native asNative() { throw new IllegalStateException("Not a native type, but a custom one"); } + public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a custom one"); } + public Custom asCustom() { return this; } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 6e0b1a9fae5..1c137b0d9e3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -1,20 +1,53 @@ package com.datastax.driver.core; -import java.util.Iterator; -import java.util.List; +import java.nio.ByteBuffer; +import java.util.*; + +import org.apache.cassandra.cql3.ColumnSpecification; +import org.apache.cassandra.transport.messages.ResultMessage; /** * The result of a query. + * + * Note that this class is not thread-safe. */ public class ResultSet implements Iterable { + private static final ResultSet EMPTY = new ResultSet(Columns.EMPTY, new ArrayDeque(0)); + + private final Columns metadata; + private final Queue> rows; + + private ResultSet(Columns metadata, Queue> rows) { + + this.metadata = metadata; + this.rows = rows; + } + + static ResultSet fromMessage(ResultMessage msg) { + + // TODO: turn that into a switch (need to expose the message kind in C*) + if (msg instanceof ResultMessage.Void) { + return EMPTY; + } else if (msg instanceof ResultMessage.Rows) { + ResultMessage.Rows r = (ResultMessage.Rows)msg; + Columns.Definition[] defs = new Columns.Definition[r.result.metadata.names.size()]; + for (int i = 0; i < defs.length; i++) + defs[i] = Columns.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); + + return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); + } else { + throw new IllegalArgumentException("Cannot create a ResultSet from " + msg); + } + } + /** * The columns returned in this ResultSet. * * @return the columns returned in this ResultSet. */ public Columns columns() { - return null; + return metadata; } /** @@ -23,7 +56,7 @@ public Columns columns() { * @return whether this ResultSet has more results. */ public boolean isExhausted() { - return true; + return rows.isEmpty(); } /** @@ -33,7 +66,7 @@ public boolean isExhausted() { * exhausted. */ public CQLRow fetchOne() { - return null; + return CQLRow.fromData(metadata, rows.poll()); } /** @@ -43,7 +76,13 @@ public CQLRow fetchOne() { * returned list is empty if and only the ResultSet is exhausted. */ public List fetchAll() { - return null; + if (isExhausted()) + return Collections.emptyList(); + + List result = new ArrayList(rows.size()); + for (CQLRow row : this) + result.add(row); + return result; } /** @@ -59,7 +98,29 @@ public List fetchAll() { * this ResultSet. */ public Iterator iterator() { - return null; + + return new Iterator() { + + public boolean hasNext() { + return !rows.isEmpty(); + } + + public CQLRow next() { + return CQLRow.fromData(metadata, rows.poll()); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("ResultSet[ exhausted: ").append(isExhausted()); + sb.append(", ").append(metadata).append("]"); + return sb.toString(); } public static class Future // implements java.util.concurrent.Future diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index d1487f0d6df..8b12af90c5e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -72,8 +72,14 @@ public ResultSet execute(String query) { QueryMessage msg = new QueryMessage(query); Connection.Future future = connection.write(msg); Message.Response response = future.get(); - logger.info("Got " + response); - return null; + + if (response.type == Message.Type.RESULT) { + return ResultSet.fromMessage((ResultMessage)response); + } + else { + logger.info("Got " + response); + return null; + } } catch (Exception e) { throw new RuntimeException(e); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java new file mode 100644 index 00000000000..7e5cf376825 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java @@ -0,0 +1,86 @@ +package com.datastax.driver.core.codec; + +import java.util.HashMap; +import java.util.Map; + +import com.datastax.driver.core.DataType; + +import org.apache.cassandra.cql3.ColumnSpecification; +import org.apache.cassandra.db.marshal.*; + +/** + * Static method to code/decode serialized data given their types. + */ +public class Codec { + + private static Map, DataType.Native> rawNativeMap = new HashMap, DataType.Native>(); + static { + rawNativeMap.put(AsciiType.instance, DataType.Native.ASCII); + rawNativeMap.put(LongType.instance, DataType.Native.BIGINT); + rawNativeMap.put(BytesType.instance, DataType.Native.BLOB); + rawNativeMap.put(BooleanType.instance, DataType.Native.BOOLEAN); + rawNativeMap.put(CounterColumnType.instance, DataType.Native.COUNTER); + rawNativeMap.put(DecimalType.instance, DataType.Native.DECIMAL); + rawNativeMap.put(DoubleType.instance, DataType.Native.DOUBLE); + rawNativeMap.put(FloatType.instance, DataType.Native.FLOAT); + rawNativeMap.put(InetAddressType.instance, DataType.Native.INET); + rawNativeMap.put(Int32Type.instance, DataType.Native.INT); + rawNativeMap.put(UTF8Type.instance, DataType.Native.TEXT); + rawNativeMap.put(DateType.instance, DataType.Native.TIMESTAMP); + rawNativeMap.put(UUIDType.instance, DataType.Native.UUID); + rawNativeMap.put(UTF8Type.instance, DataType.Native.VARCHAR); + rawNativeMap.put(IntegerType.instance, DataType.Native.VARINT); + rawNativeMap.put(TimeUUIDType.instance, DataType.Native.TIMEUUID); + } + + private Codec() {} + + public static AbstractType getCodec(DataType type) { + switch (type.kind()) { + case NATIVE: return nativeCodec(type.asNative()); + case COLLECTION: return collectionCodec(type.asCollection()); + case CUSTOM: return customCodec(type.asCustom()); + default: throw new RuntimeException("Unknow data type kind"); + } + } + + private static AbstractType nativeCodec(DataType.Native type) { + + switch (type) { + case ASCII: return AsciiType.instance; + case BIGINT: return LongType.instance; + case BLOB: return BytesType.instance; + case BOOLEAN: return BooleanType.instance; + case COUNTER: return CounterColumnType.instance; + case DECIMAL: return DecimalType.instance; + case DOUBLE: return DoubleType.instance; + case FLOAT: return FloatType.instance; + case INET: return InetAddressType.instance; + case INT: return Int32Type.instance; + case TEXT: return UTF8Type.instance; + case TIMESTAMP: return DateType.instance; + case UUID: return UUIDType.instance; + case VARCHAR: return UTF8Type.instance; + case VARINT: return IntegerType.instance; + case TIMEUUID: return TimeUUIDType.instance; + default: throw new RuntimeException("Unknown native type"); + } + } + + private static AbstractType collectionCodec(DataType.Collection type) { + return null; + } + + private static AbstractType customCodec(DataType.Custom type) { + return null; + } + + public static DataType rawTypeToDataType(AbstractType rawType) { + DataType type = rawNativeMap.get(rawType); + if (type != null) + return type; + + // TODO: handle collections and custom + return null; + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index ba5c2126c54..b870d00a172 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -28,6 +28,9 @@ public void SimpleExecuteTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); Session session = cluster.connect(); - session.execute("SELECT * FROM system.local"); + ResultSet rs = session.execute("SELECT * FROM system.local"); + System.out.println(rs.columns().toString()); + for (CQLRow row : rs) + System.out.println(row.toString()); } } From 8a6fa746e4fbb2789bce6bc2e38338e7a6f85809 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 23 Jul 2012 15:58:08 +0200 Subject: [PATCH 012/719] Fix small race and don't crash on response from a USE query --- .../com/datastax/driver/core/ResultSet.java | 24 +++++++------- .../com/datastax/driver/core/Session.java | 9 ++++- .../driver/core/transport/Connection.java | 16 ++++++--- .../com/datastax/driver/core/SessionTest.java | 33 ++++++++++++++++--- 4 files changed, 60 insertions(+), 22 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 1c137b0d9e3..a414570e098 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -26,18 +26,18 @@ private ResultSet(Columns metadata, Queue> rows) { static ResultSet fromMessage(ResultMessage msg) { - // TODO: turn that into a switch (need to expose the message kind in C*) - if (msg instanceof ResultMessage.Void) { - return EMPTY; - } else if (msg instanceof ResultMessage.Rows) { - ResultMessage.Rows r = (ResultMessage.Rows)msg; - Columns.Definition[] defs = new Columns.Definition[r.result.metadata.names.size()]; - for (int i = 0; i < defs.length; i++) - defs[i] = Columns.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); - - return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); - } else { - throw new IllegalArgumentException("Cannot create a ResultSet from " + msg); + switch (msg.kind) { + case VOID: + return EMPTY; + case ROWS: + ResultMessage.Rows r = (ResultMessage.Rows)msg; + Columns.Definition[] defs = new Columns.Definition[r.result.metadata.names.size()]; + for (int i = 0; i < defs.length; i++) + defs[i] = Columns.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); + + return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); + default: + throw new IllegalArgumentException("Cannot create a ResultSet from " + msg); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 8b12af90c5e..2b93ce52654 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -74,7 +74,14 @@ public ResultSet execute(String query) { Message.Response response = future.get(); if (response.type == Message.Type.RESULT) { - return ResultSet.fromMessage((ResultMessage)response); + ResultMessage rmsg = (ResultMessage)response; + switch (rmsg.kind) { + case VOID: + case ROWS: + return ResultSet.fromMessage(rmsg); + } + logger.info("Got " + response); + return null; } else { logger.info("Got " + response); diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 1f548733c8a..cdaf4713dc9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -117,7 +117,10 @@ public Future write(Message.Request request) throws ConnectionException { ChannelFuture writeFuture = channel.write(request); writeFuture.awaitUninterruptibly(); if (!writeFuture.isSuccess()) + { + dispatcher.setFuture(null); throw new TransportException(address, "Error writting", writeFuture.getCause()); + } return future; @@ -186,7 +189,6 @@ private ClientBootstrap bootstrap() { private class Dispatcher extends SimpleChannelUpstreamHandler { private volatile Future future; - private volatile Exception exception; public void setFuture(Future future) { this.future = future; @@ -196,14 +198,18 @@ public void setFuture(Future future) { public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { // TODO: we should do something better than just throwing an exception if (future == null) - throw new RuntimeException("Not future set"); + throw new RuntimeException(String.format("Received %s but no future set", e.getMessage())); + + // As soon as we set the value to the currently set future, a new write could + // be started, so reset the local variable to null *before* setting the future for this query. + Future current = future; + future = null; if (!(e.getMessage() instanceof Message.Response)) { - future.setException(new TransportException(address, "Unexpected message received: " + e.getMessage())); + current.setException(new TransportException(address, "Unexpected message received: " + e.getMessage())); } else { - future.set((Message.Response)e.getMessage()); + current.set((Message.Response)e.getMessage()); } - future = null; } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index b870d00a172..619b95e2f79 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -1,5 +1,7 @@ package com.datastax.driver.core; +import java.util.*; + import org.junit.BeforeClass; import org.junit.Test; import static junit.framework.Assert.*; @@ -28,9 +30,32 @@ public void SimpleExecuteTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); Session session = cluster.connect(); - ResultSet rs = session.execute("SELECT * FROM system.local"); - System.out.println(rs.columns().toString()); - for (CQLRow row : rs) - System.out.println(row.toString()); + ResultSet rs; + + session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); + session.execute("USE test_ks"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + + rs = session.execute("INSERT INTO test (k, i, f) VALUES ('foo', 0, 0.2)"); + assertTrue(rs.isExhausted()); + + rs = session.execute("INSERT INTO test (k, i, f) VALUES ('bar', 1, 3.4)"); + assertTrue(rs.isExhausted()); + + rs = session.execute("SELECT * FROM test"); + List l = rs.fetchAll(); + assertEquals(2, l.size()); + + CQLRow r; + r = l.get(0); + assertEquals("bar", r.getString(0)); + assertEquals("bar", r.getString("k")); + assertEquals(1, r.getInt("i")); + assertEquals(3.4, r.getFloat("f"), 0.01); + + r = l.get(1); + assertEquals("foo", r.getString("k")); + assertEquals(0, r.getInt("i")); + assertEquals(0.2, r.getFloat("f"), 0.01); } } From d8863fe76aadfc11832fdd70fba61d16f3a8cf6b Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 23 Jul 2012 18:24:31 +0200 Subject: [PATCH 013/719] Minor fixes to the javadoc --- .../com/datastax/driver/core/Cluster.java | 26 ++++++++++++++----- .../com/datastax/driver/core/ResultSet.java | 8 +++--- .../com/datastax/driver/core/Session.java | 26 ++++++++++++++++--- 3 files changed, 46 insertions(+), 14 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 4db018bb201..ab594a3f652 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -79,7 +79,7 @@ public Session connect(AuthInfo authInfo) { /** * Creates a new session on this cluster and sets a keyspace to use. * - * @param keyspaceName The name of the keyspace to use for the created + * @param keyspace The name of the keyspace to use for the created * {@code Session}. This can be later changed using {@link Session#use}. * @return a new session on this cluster sets to keyspace * {@code keyspaceName}. @@ -144,13 +144,16 @@ public Builder addContactPoint(String address, int port) { /** * Add a contact point using the default Cassandra port. * - * @see addContactPoint for more details on contact points. + * See {@link Builder#addContactPoint} for more details on contact + * points. * * @param address the address of the node to add as contact point * @return this Builder * * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. + * + * @see Builder#addContactPoint */ public Builder addContactPoint(String address) { return addContactPoint(address, DEFAULT_PORT); @@ -159,13 +162,16 @@ public Builder addContactPoint(String address) { /** * Add contact points using the default Cassandra port. * - * @see addContactPoint for more details on contact points. + * See {@link Builder#addContactPoint} for more details on contact + * points. * * @param addresses addresses of the nodes to add as contact point * @return this Builder * * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. + * + * @see Builder#addContactPoint */ public Builder addContactPoints(String... addresses) { for (String address : addresses) @@ -176,13 +182,16 @@ public Builder addContactPoints(String... addresses) { /** * Add contact points using the default Cassandra port. * - * @see addContactPoint for more details on contact points. + * See {@link Builder#addContactPoint} for more details on contact + * points. * * @param addresses addresses of the nodes to add as contact point * @return this Builder * * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. + * + * @see Builder#addContactPoint */ public Builder addContactPoints(InetAddress... addresses) { for (InetAddress address : addresses) @@ -193,14 +202,17 @@ public Builder addContactPoints(InetAddress... addresses) { /** * Add contact points. * - * @see addContactPoint for more details on contact points. + * See {@link Builder#addContactPoint} for more details on contact + * points. * - * @param sockAddresses the socket addresses of the nodes to add as - * contact point + * @param addresses the socket addresses of the nodes to add as contact + * point * @return this Builder * * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. + * + * @see Builder#addContactPoint */ public Builder addContactPoints(InetSocketAddress... addresses) { this.addresses.addAll(Arrays.asList(addresses)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index a414570e098..ccf9771af1b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -88,11 +88,11 @@ public List fetchAll() { /** * An iterator over the rows contained in this ResultSet. * - * The {@link Iterator.next iterator next()} method is equivalent to - * calling {@link fetchOne}. So this iterator will consume results from - * this ResultSet and after a full iteration, the ResultSet will be empty. + * The {@link Iterator#next} method is equivalent to calling {@link #fetchOne}. + * So this iterator will consume results from this ResultSet and after a + * full iteration, the ResultSet will be empty. * - * The returned iterator does not support the {@link Iterato.remove} method. + * The returned iterator does not support the {@link Iterator#remove} method. * * @return an iterator that will consume and return the remaining rows of * this ResultSet. diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 2b93ce52654..86037ec85c0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -45,7 +45,6 @@ public class Session { * * @param keyspace the name of the keyspace to set * @return this session. - * */ public Session use(String keyspace) { return null; @@ -93,6 +92,14 @@ public ResultSet execute(String query) { } /** + * Execute the provided query. + * + * This method works exactly as {@link #execute(String)}. + * + * @param query the CQL query to execute + * @return the result of the query. That result will never be null be can + * be empty and will be for any non SELECT query. + * * @see #execute(String) */ public ResultSet execute(CQLQuery query) { @@ -121,6 +128,14 @@ public ResultSet.Future executeAsync(String query) { } /** + * Execute the provided query asynchronously. + * + * This method works exactly as {@link #executeAsync(String)}. + * + * @param query the CQL query to execute + * @return the result of the query. That result will never be null be can + * be empty and will be for any non SELECT query. + * * @see #executeAsync(String) */ public ResultSet.Future executeAsync(CQLQuery query) { @@ -138,6 +153,11 @@ public PreparedStatement prepare(String query) { } /** + * Prepare the provided query. + * + * @param query the CQL query to prepare + * @return the prepared statement corresponding to {@code query}. + * * @see #prepare(String) */ public PreparedStatement prepare(CQLQuery query) { @@ -148,7 +168,7 @@ public PreparedStatement prepare(CQLQuery query) { * Execute a prepared statement that had values provided for its bound * variables. * - * This method performs like {@link execute} but for prepared statements. + * This method performs like {@link #execute} but for prepared statements. * It blocks until at least some result has been received from the * database. * @@ -164,7 +184,7 @@ public ResultSet executePrepared(BoundStatement stmt) { * Execute a prepared statement that had values provided for its bound * variables asynchronously. * - * This method performs like {@link executeAsync} but for prepared + * This method performs like {@link #executeAsync} but for prepared * statements. It return as soon as the query has been successfully sent to * the database. * From af18136ea59eb77e7e76e90fa63b01e540a882c6 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 24 Jul 2012 12:47:40 +0200 Subject: [PATCH 014/719] Implements (most of) prepared statements --- .../datastax/driver/core/BoundStatement.java | 211 +++++++++++++++++- .../java/com/datastax/driver/core/CQLRow.java | 51 ++--- .../com/datastax/driver/core/Columns.java | 15 ++ .../driver/core/PreparedStatement.java | 31 ++- .../com/datastax/driver/core/Session.java | 52 ++++- .../com/datastax/driver/core/SessionTest.java | 43 +++- 6 files changed, 356 insertions(+), 47 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 8662e55a87d..89bc0e9fdc6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -1,14 +1,38 @@ package com.datastax.driver.core; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.UUID; + +import org.apache.cassandra.db.marshal.*; + +/** + * A prepared statement with values bound to the bind variables. + *

+ * Once a BoundStatement has values for all the variables of the {@link PreparedStatement} + * it has been created from, it can executed through {@link Session#executePrepared}. + */ public class BoundStatement { + final PreparedStatement statement; + final ByteBuffer[] values; + private int remaining; + + BoundStatement(PreparedStatement statement) { + this.statement = statement; + this.values = new ByteBuffer[statement.variables().count()]; + this.remaining = values.length; + } + /** * Returns the prepared statement on which this BoundStatement is based. * * @return the prepared statement on which this BoundStatement is based. */ public PreparedStatement preparedStatement() { - return null; + return statement; } /** @@ -18,7 +42,16 @@ public PreparedStatement preparedStatement() { * @return whether all variables are bound. */ public boolean ready() { - return false; + return remaining == 0; + } + + public boolean isSet(int i) { + metadata().checkBounds(i); + return values[i] != null; + } + + public boolean isSet(String name) { + return isSet(metadata().getIdx(name)); } public BoundStatement bind(Object... values) { @@ -26,20 +59,184 @@ public BoundStatement bind(Object... values) { } public BoundStatement setBool(int i, boolean v) { - return null; + metadata().checkType(i, DataType.Native.BOOLEAN); + return setValue(i, BooleanType.instance.decompose(v)); } public BoundStatement setBool(String name, boolean v) { - return null; + return setBool(metadata().getIdx(name), v); } public BoundStatement setInt(int i, int v) { - return null; + DataType.Native type = metadata().checkType(i, DataType.Native.INT, + DataType.Native.TIMESTAMP, + DataType.Native.BIGINT, + DataType.Native.COUNTER, + DataType.Native.VARINT); + + switch (type) { + case INT: + return setValue(i, Int32Type.instance.decompose(v)); + case TIMESTAMP: + case BIGINT: + case COUNTER: + return setValue(i, LongType.instance.decompose((long)v)); + case VARINT: + return setValue(i, IntegerType.instance.decompose(BigInteger.valueOf((long)v))); + default: + throw new AssertionError(); + } } public BoundStatement setInt(String name, int v) { - return null; + return setInt(metadata().getIdx(name), v); + } + + public BoundStatement setLong(int i, long v) { + DataType.Native type = metadata().checkType(i, DataType.Native.BIGINT, + DataType.Native.TIMESTAMP, + DataType.Native.COUNTER, + DataType.Native.VARINT); + + switch (type) { + case TIMESTAMP: + case BIGINT: + case COUNTER: + return setValue(i, LongType.instance.decompose(v)); + case VARINT: + return setValue(i, IntegerType.instance.decompose(BigInteger.valueOf(v))); + default: + throw new AssertionError(); + } + } + + public BoundStatement setLong(String name, long v) { + return setLong(metadata().getIdx(name), v); + } + + public BoundStatement setDate(int i, Date v) { + metadata().checkType(i, DataType.Native.TIMESTAMP); + return setValue(i, DateType.instance.decompose(v)); + } + + public BoundStatement setDate(String name, Date v) { + return setDate(metadata().getIdx(name), v); + } + + public BoundStatement setFloat(int i, float v) { + DataType.Native type = metadata().checkType(i, DataType.Native.FLOAT, + DataType.Native.DOUBLE, + DataType.Native.DECIMAL); + + switch (type) { + case FLOAT: + return setValue(i, FloatType.instance.decompose(v)); + case DOUBLE: + return setValue(i, DoubleType.instance.decompose((double)v)); + case DECIMAL: + return setValue(i, DecimalType.instance.decompose(BigDecimal.valueOf((double)v))); + default: + throw new AssertionError(); + } + } + + public BoundStatement setFloat(String name, float v) { + return setFloat(metadata().getIdx(name), v); + } + + public BoundStatement setDouble(int i, double v) { + DataType.Native type = metadata().checkType(i, DataType.Native.DOUBLE, + DataType.Native.DECIMAL); + switch (type) { + case DOUBLE: + return setValue(i, DoubleType.instance.decompose(v)); + case DECIMAL: + return setValue(i, DecimalType.instance.decompose(BigDecimal.valueOf(v))); + default: + throw new AssertionError(); + } } - // ... + public BoundStatement setDouble(String name, double v) { + return setDouble(metadata().getIdx(name), v); + } + + public BoundStatement setString(int i, String v) { + DataType.Native type = metadata().checkType(i, DataType.Native.VARCHAR, + DataType.Native.TEXT, + DataType.Native.ASCII); + switch (type) { + case ASCII: + return setValue(i, AsciiType.instance.decompose(v)); + case TEXT: + case VARCHAR: + return setValue(i, UTF8Type.instance.decompose(v)); + default: + throw new AssertionError(); + } + } + + public BoundStatement setString(String name, String v) { + return setString(metadata().getIdx(name), v); + } + + public BoundStatement setByteBuffer(int i, ByteBuffer v) { + return setValue(i, v.duplicate()); + } + + public BoundStatement setByteBuffer(String name, ByteBuffer v) { + return setByteBuffer(metadata().getIdx(name), v); + } + + public BoundStatement setBytes(int i, byte[] v) { + return setValue(i, ByteBuffer.wrap(v)); + } + + public BoundStatement setBytes(String name, byte[] v) { + return setBytes(metadata().getIdx(name), v); + } + + public BoundStatement setVarInt(int i, BigInteger v) { + metadata().checkType(i, DataType.Native.VARINT); + return setValue(i, IntegerType.instance.decompose(v)); + } + + public BoundStatement setVarInt(String name, BigInteger v) { + return setVarInt(metadata().getIdx(name), v); + } + + public BoundStatement setDecimal(int i, BigDecimal v) { + metadata().checkType(i, DataType.Native.DECIMAL); + return setValue(i, DecimalType.instance.decompose(v)); + } + + public BoundStatement setDecimal(String name, BigDecimal v) { + return setDecimal(metadata().getIdx(name), v); + } + + public BoundStatement setUUID(int i, UUID v) { + DataType.Native type = metadata().checkType(i, DataType.Native.UUID, + DataType.Native.TIMEUUID); + + return type == DataType.Native.UUID + ? setValue(i, UUIDType.instance.decompose(v)) + : setValue(i, TimeUUIDType.instance.decompose(v)); + } + + public BoundStatement setUUID(String name, UUID v) { + return setUUID(metadata().getIdx(name), v); + } + + private Columns metadata() { + return statement.metadata; + } + + private BoundStatement setValue(int i, ByteBuffer value) { + ByteBuffer previous = values[i]; + values[i] = value; + + if (previous == null) + remaining--; + return this; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index ebd4982a792..a2339a3adac 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -39,7 +39,7 @@ public Columns columns() { } public boolean isNull(int i) { - checkBounds(i); + metadata.checkBounds(i); return data.get(i) != null; } @@ -50,7 +50,7 @@ public boolean isNull(String name) { /** */ public boolean getBool(int i) { - checkType(i, DataType.Native.BOOLEAN); + metadata.checkType(i, DataType.Native.BOOLEAN); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -64,7 +64,7 @@ public boolean getBool(String name) { } public int getInt(int i) { - checkType(i, DataType.Native.INT); + metadata.checkType(i, DataType.Native.INT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -78,10 +78,10 @@ public int getInt(String name) { } public long getLong(int i) { - DataType type = checkType(i, DataType.Native.BIGINT, - DataType.Native.TIMESTAMP, - DataType.Native.INT, - DataType.Native.COUNTER); + DataType type = metadata.checkType(i, DataType.Native.BIGINT, + DataType.Native.TIMESTAMP, + DataType.Native.INT, + DataType.Native.COUNTER); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -97,7 +97,7 @@ public long getLong(String name) { } public Date getDate(int i) { - checkType(i, DataType.Native.TIMESTAMP); + metadata.checkType(i, DataType.Native.TIMESTAMP); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -111,7 +111,7 @@ public Date getDate(String name) { } public float getFloat(int i) { - checkType(i, DataType.Native.FLOAT); + metadata.checkType(i, DataType.Native.FLOAT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -125,8 +125,8 @@ public float getFloat(String name) { } public double getDouble(int i) { - DataType type = checkType(i, DataType.Native.DOUBLE, - DataType.Native.FLOAT); + DataType type = metadata.checkType(i, DataType.Native.DOUBLE, + DataType.Native.FLOAT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -138,7 +138,7 @@ public double getDouble(int i) { } public ByteBuffer getByteBuffer(int i) { - checkBounds(i); + metadata.checkBounds(i); ByteBuffer value = data.get(i); if (value == null) @@ -163,9 +163,9 @@ public byte[] getBytes(String name) { } public String getString(int i) { - DataType type = checkType(i, DataType.Native.VARCHAR, - DataType.Native.TEXT, - DataType.Native.ASCII); + DataType type = metadata.checkType(i, DataType.Native.VARCHAR, + DataType.Native.TEXT, + DataType.Native.ASCII); ByteBuffer value = data.get(i); if (value == null) @@ -181,7 +181,7 @@ public String getString(String name) { } public BigInteger getVarInt(int i) { - checkType(i, DataType.Native.VARINT); + metadata.checkType(i, DataType.Native.VARINT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -195,7 +195,7 @@ public BigInteger getVarInt(String name) { } public BigDecimal getDecimal(int i) { - checkType(i, DataType.Native.DECIMAL); + metadata.checkType(i, DataType.Native.DECIMAL); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -209,7 +209,7 @@ public BigDecimal getDecimal(String name) { } public UUID getUUID(int i) { - DataType type = checkType(i, DataType.Native.UUID, DataType.Native.TIMEUUID); + DataType type = metadata.checkType(i, DataType.Native.UUID, DataType.Native.TIMEUUID); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -224,21 +224,6 @@ public UUID getUUID(String name) { return getUUID(metadata.getIdx(name)); } - private DataType checkType(int i, DataType.Native... types) { - DataType defined = metadata.type(i); - for (DataType.Native type : types) - if (type == defined) - return type; - - // TODO: change that exception - throw new IllegalArgumentException(String.format("Column %s is of type %s", metadata.name(i), defined)); - } - - private void checkBounds(int i) { - if (i < 0 || i >= metadata.count()) - throw new ArrayIndexOutOfBoundsException(i); - } - @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 77ce6ac7a88..135db4edcb0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -164,6 +164,21 @@ int getIdx(String name) { return idx; } + void checkBounds(int i) { + if (i < 0 || i >= count()) + throw new ArrayIndexOutOfBoundsException(i); + } + + DataType.Native checkType(int i, DataType.Native... types) { + DataType defined = type(i); + for (DataType.Native type : types) + if (type == defined) + return type; + + // TODO: change that exception + throw new IllegalArgumentException(String.format("Column %s is of type %s", name(i), defined)); + } + public static class Definition { public final String keyspace; diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 17131c75abb..087e6d7d017 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -1,5 +1,7 @@ package com.datastax.driver.core; +import org.apache.cassandra.transport.messages.ResultMessage; + /** * Represents a prepared statement, a query with bound variables that has been * prepared (pre-parsed) by the database. @@ -11,15 +13,38 @@ */ public class PreparedStatement { + final Columns metadata; + final int id; + + private PreparedStatement(Columns metadata, int id) { + this.metadata = metadata; + this.id = id; + } + + static PreparedStatement fromMessage(ResultMessage.Prepared msg) { + + Columns.Definition[] defs = new Columns.Definition[msg.metadata.names.size()]; + for (int i = 0; i < defs.length; i++) + defs[i] = Columns.Definition.fromTransportSpecification(msg.metadata.names.get(i)); + + return new PreparedStatement(new Columns(defs), msg.statementId); + } + + /** + * Returns metadata on the bounded variables of this prepared statement. + * + * @return the variables bounded in this prepared statement. + */ public Columns variables() { - return null; + return metadata; } public BoundStatement bind(Object... values) { - return null; + BoundStatement bs = new BoundStatement(this); + return bs.bind(values); } public BoundStatement newBoundStatement() { - return null; + return new BoundStatement(this); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 86037ec85c0..87f38ece0b2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -1,5 +1,6 @@ package com.datastax.driver.core; +import java.util.Arrays; import java.util.List; import java.net.InetSocketAddress; @@ -149,7 +150,29 @@ public ResultSet.Future executeAsync(CQLQuery query) { * @return the prepared statement corresponding to {@code query}. */ public PreparedStatement prepare(String query) { - return null; + + // TODO: this is not the real deal, just for tests + try { + PrepareMessage msg = new PrepareMessage(query); + Connection.Future future = connection.write(msg); + Message.Response response = future.get(); + + if (response.type == Message.Type.RESULT) { + ResultMessage rmsg = (ResultMessage)response; + switch (rmsg.kind) { + case PREPARED: + return PreparedStatement.fromMessage((ResultMessage.Prepared)rmsg); + } + logger.info("Got " + response); + return null; + } + else { + logger.info("Got " + response); + return null; + } + } catch (Exception e) { + throw new RuntimeException(e); + } } /** @@ -177,7 +200,32 @@ public PreparedStatement prepare(CQLQuery query) { * be empty and will be for any non SELECT query. */ public ResultSet executePrepared(BoundStatement stmt) { - return null; + // TODO: this is not the real deal, just for tests + try { + if (!stmt.ready()) + throw new IllegalArgumentException("Provided statement has some variables not bound to values"); + + ExecuteMessage msg = new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values)); + Connection.Future future = connection.write(msg); + Message.Response response = future.get(); + + if (response.type == Message.Type.RESULT) { + ResultMessage rmsg = (ResultMessage)response; + switch (rmsg.kind) { + case VOID: + case ROWS: + return ResultSet.fromMessage(rmsg); + } + logger.info("Got " + response); + return null; + } + else { + logger.info("Got " + response); + return null; + } + } catch (Exception e) { + throw new RuntimeException(e); + } } /** diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 619b95e2f79..3c9883b9af2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -30,12 +30,12 @@ public void SimpleExecuteTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); Session session = cluster.connect(); - ResultSet rs; - session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); session.execute("USE test_ks"); session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + ResultSet rs; + rs = session.execute("INSERT INTO test (k, i, f) VALUES ('foo', 0, 0.2)"); assertTrue(rs.isExhausted()); @@ -58,4 +58,43 @@ public void SimpleExecuteTest() throws Exception { assertEquals(0, r.getInt("i")); assertEquals(0.2, r.getFloat("f"), 0.01); } + + @Test + public void PreparedStatementTest() throws Exception { + + Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); + Session session = cluster.connect(); + + session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); + session.execute("USE test_ks"); + session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); + + PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); + PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); + + ResultSet rs; + BoundStatement bs; + + bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); + rs = session.executePrepared(bs); + + bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); + rs = session.executePrepared(bs); + + bs = selectStmt.newBoundStatement().setString("k", "prep"); + rs = session.executePrepared(bs); + List l = rs.fetchAll(); + assertEquals(2, l.size()); + + CQLRow r; + r = l.get(0); + assertEquals("prep", r.getString(0)); + assertEquals(1, r.getInt("i")); + assertEquals(0.1, r.getFloat("f"), 0.01); + + r = l.get(1); + assertEquals("prep", r.getString("k")); + assertEquals(2, r.getInt("i")); + assertEquals(0.2, r.getFloat("f"), 0.01); + } } From 880a29b735a93f7aa08a3316e5c0a89ad2e50565 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 24 Jul 2012 14:56:16 +0200 Subject: [PATCH 015/719] Fix package name --- driver-core/src/main/java/com/datastax/driver/core/CQLRow.java | 2 +- driver-core/src/main/java/com/datastax/driver/core/Columns.java | 2 +- .../src/main/java/com/datastax/driver/core/transport/Codec.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index a2339a3adac..4c3bab1d8db 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -5,7 +5,7 @@ import java.nio.ByteBuffer; import java.util.*; -import com.datastax.driver.core.codec.Codec; +import com.datastax.driver.core.transport.Codec; import org.apache.cassandra.db.marshal.*; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 135db4edcb0..54b164eb682 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -2,7 +2,7 @@ import java.util.*; -import com.datastax.driver.core.codec.Codec; +import com.datastax.driver.core.transport.Codec; import org.apache.cassandra.cql3.ColumnSpecification; diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java index 7e5cf376825..38950c5cf6e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.codec; +package com.datastax.driver.core.transport; import java.util.HashMap; import java.util.Map; From 46212ffcaf0d1d00c98889240d89ab53944b0a02 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 24 Jul 2012 14:52:47 +0200 Subject: [PATCH 016/719] Basic pooling and host conviction/retry policy --- .../com/datastax/driver/core/Cluster.java | 58 +++-- .../driver/core/ConvictionPolicy.java | 16 ++ .../java/com/datastax/driver/core/Host.java | 60 +++++ .../driver/core/LoadBalancingPolicy.java | 11 + .../driver/core/PreparedStatement.java | 23 +- .../driver/core/ReconnectionHandler.java | 53 +++++ .../com/datastax/driver/core/ResultSet.java | 8 +- .../driver/core/RoundRobinPolicy.java | 81 +++++++ .../com/datastax/driver/core/Session.java | 221 +++++++++++------ .../driver/core/pool/HealthMonitor.java | 55 +++++ .../driver/core/pool/HostConnectionPool.java | 225 ++++++++++++++++++ .../driver/core/transport/Connection.java | 212 +++++++++++++---- .../core/utils/SimpleConvictionPolicy.java | 30 +++ .../com/datastax/driver/core/SessionTest.java | 127 +++++----- 14 files changed, 981 insertions(+), 199 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/Host.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index ab594a3f652..2c1d3313369 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -2,11 +2,15 @@ import java.net.InetAddress; import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; +import java.util.*; +import java.util.concurrent.*; +import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; +import com.datastax.driver.core.utils.SimpleConvictionPolicy; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Informations and known state of a Cassandra cluster. @@ -30,12 +34,37 @@ * against node set as contact point. If you want to limit the number of nodes * to which this driver connects to, prefer maxConnectedNode(). */ -public class Cluster { +public class Cluster implements Host.StateListener { + + private static final Logger logger = LoggerFactory.getLogger(Cluster.class); + + private final List contactPoints; + + private final Set sessions = new CopyOnWriteArraySet(); + final Connection.Factory connectionFactory = new Connection.Factory(); - private final List contactPoints; + private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); + + // TODO: Make that configurable + private final ConvictionPolicy.Factory convictionPolicyFactory = new SimpleConvictionPolicy.Factory(); private Cluster(List contactPoints) { - this.contactPoints = contactPoints; + this.contactPoints = new ArrayList(contactPoints.size()); + for (InetSocketAddress address : contactPoints) + this.contactPoints.add(new Host(address, convictionPolicyFactory)); + } + + public void onUp(Host host) { + // Nothing specific + // TODO: We should register reconnection attempts, to avoid starting two of + // them and if this method is called by other means that the + // reconnection handler (like C* tells us it's up), cancel the latter + } + + public void onDown(Host host) { + // Note: we'll basically waste the first successful reconnection that way, but it's probably not a big deal + logger.debug(String.format("%s is down, scheduling connection retries", host)); + new ReconnectionHandler(host, scheduledExecutor, connectionFactory).start(); } /** @@ -57,12 +86,12 @@ public static Cluster buildFrom(Configuration config) { * @return a new session on this cluster sets to no keyspace. */ public Session connect() { - try { - return new Session(contactPoints); - } catch (ConnectionException e) { - // TODO: Figure what exception we want to return (but maybe the ConnectionException is good enough) - throw new RuntimeException(e); - } + for (Host host : contactPoints) + host.monitor().register(this); + + Session session = new Session(this, contactPoints); + sessions.add(session); + return session; } /** @@ -73,6 +102,7 @@ public Session connect() { * @return a new session on this cluster sets to no keyspace. */ public Session connect(AuthInfo authInfo) { + // TODO return null; } @@ -85,7 +115,7 @@ public Session connect(AuthInfo authInfo) { * {@code keyspaceName}. */ public Session connect(String keyspace) { - return null; + return connect().use(keyspace); } /** @@ -97,7 +127,7 @@ public Session connect(String keyspace) { * {@code keyspaceName}. */ public Session connect(String keyspace, AuthInfo authInfo) { - return null; + return connect(authInfo).use(keyspace); } public interface Configuration { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java new file mode 100644 index 00000000000..5ce39aab17f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -0,0 +1,16 @@ +package com.datastax.driver.core; + +import com.datastax.driver.core.transport.ConnectionException; + +public interface ConvictionPolicy { + + // Return wether to mark the node down + public boolean addFailure(ConnectionException exception); + + // Return wether to mark the node down + public boolean addFailureFromExternalDetector(); + + public interface Factory { + public ConvictionPolicy create(Host host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java new file mode 100644 index 00000000000..446b55144d2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -0,0 +1,60 @@ +package com.datastax.driver.core; + +import java.net.InetSocketAddress; + +import com.datastax.driver.core.pool.HealthMonitor; +import com.datastax.driver.core.transport.ConnectionException; + +/** + * A Cassandra node. + */ +public class Host { + + private final InetSocketAddress address; + private final HealthMonitor monitor; + + Host(InetSocketAddress address, ConvictionPolicy.Factory policy) { + if (address == null || policy == null) + throw new NullPointerException(); + + this.address = address; + this.monitor = new HealthMonitor(this, policy.create(this)); + } + + public InetSocketAddress getAddress() { + return address; + } + + public HealthMonitor monitor() { + return monitor; + } + + @Override + public final int hashCode() { + return address.hashCode(); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof Host)) + return false; + + return address.equals(((Host)o).address); + } + + @Override + public String toString() { + return address.toString(); + } + + // TODO: see if we can make that package protected (we should at leat not + // make the HealthMonitor interface public itself, but only the + // AbstractHealthMonitor abstract methods). The rational being that we don't + // want user to call onUp and onDown themselves. + public interface StateListener { + + public void onUp(Host host); + public void onDown(Host host); + + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java new file mode 100644 index 00000000000..65cea1bf6b0 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java @@ -0,0 +1,11 @@ +package com.datastax.driver.core; + +import java.util.Collection; +import java.util.Iterator; + +public interface LoadBalancingPolicy extends Host.StateListener { + + public void initialize(Collection hosts); + + public Iterator newQueryPlan(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 087e6d7d017..d1b89ca7b3b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -21,13 +21,22 @@ private PreparedStatement(Columns metadata, int id) { this.id = id; } - static PreparedStatement fromMessage(ResultMessage.Prepared msg) { - - Columns.Definition[] defs = new Columns.Definition[msg.metadata.names.size()]; - for (int i = 0; i < defs.length; i++) - defs[i] = Columns.Definition.fromTransportSpecification(msg.metadata.names.get(i)); - - return new PreparedStatement(new Columns(defs), msg.statementId); + static PreparedStatement fromMessage(ResultMessage msg) { + switch (msg.kind) { + case PREPARED: + ResultMessage.Prepared pmsg = (ResultMessage.Prepared)msg; + Columns.Definition[] defs = new Columns.Definition[pmsg.metadata.names.size()]; + for (int i = 0; i < defs.length; i++) + defs[i] = Columns.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); + + return new PreparedStatement(new Columns(defs), pmsg.statementId); + case VOID: + case ROWS: + case SET_KEYSPACE: + throw new RuntimeException("ResultSet received when prepared statement received was expected"); + default: + throw new AssertionError(); + } } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java new file mode 100644 index 00000000000..c048e576f00 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java @@ -0,0 +1,53 @@ +package com.datastax.driver.core; + +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import com.datastax.driver.core.transport.Connection; +import com.datastax.driver.core.transport.ConnectionException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ReconnectionHandler implements Runnable { + + private static final Logger logger = LoggerFactory.getLogger(ReconnectionHandler.class); + + private final Host host; + private final ScheduledExecutorService executor; + private final Connection.Factory factory; + + // The next delay in milliseconds + // TODO: implements something better than "every 3 seconds" + private int nextDelay = 3000; + + public ReconnectionHandler(Host host, ScheduledExecutorService executor, Connection.Factory factory) { + this.host = host; + this.executor = executor; + this.factory = factory; + } + + public void start() { + executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); + } + + public void run() { + try { + factory.open(host); + // If we're successful, the node is up and ready + logger.debug(String.format("Successful connection to %s, setting host UP", host)); + host.monitor().reset(); + } catch (ConnectionException e) { + // TODO: log the failure and implement some better policy of retry + scheduleRetry(); + } catch (Exception e) { + // TODO: log that something is wrong + scheduleRetry(); + } + } + + private void scheduleRetry() { + logger.debug(String.format("Failed connection to %s, scheduling retry in %d milliseconds", host, nextDelay)); + executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index ccf9771af1b..10580a4c8c9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -25,7 +25,6 @@ private ResultSet(Columns metadata, Queue> rows) { } static ResultSet fromMessage(ResultMessage msg) { - switch (msg.kind) { case VOID: return EMPTY; @@ -36,8 +35,13 @@ static ResultSet fromMessage(ResultMessage msg) { defs[i] = Columns.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); + case SET_KEYSPACE: + // TODO: we might want to do more with such result + return EMPTY; + case PREPARED: + throw new RuntimeException("Prepared statement received when a ResultSet was expected"); default: - throw new IllegalArgumentException("Cannot create a ResultSet from " + msg); + throw new AssertionError(); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java new file mode 100644 index 00000000000..d962991af41 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java @@ -0,0 +1,81 @@ +package com.datastax.driver.core; + +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +public class RoundRobinPolicy implements LoadBalancingPolicy { + + private volatile Host[] liveHosts; + private final AtomicInteger index = new AtomicInteger(); + + public void initialize(Collection hosts) { + this.liveHosts = hosts.toArray(new Host[hosts.size()]); + this.index.set(new Random().nextInt(hosts.size())); + } + + public Iterator newQueryPlan() { + + final Host[] hosts = liveHosts; + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) + index.set(0); + + return new Iterator() { + + private int idx = startIdx; + private int remaining = hosts.length; + + public boolean hasNext() { + return remaining > 0; + } + + public Host next() { + Host h = hosts[idx++ % hosts.length]; + remaining--; + return h; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + public synchronized void onUp(Host host) { + + for (Host h : liveHosts) + if (h.equals(host)) + return; + + Host[] newHosts = new Host[liveHosts.length + 1]; + System.arraycopy(liveHosts, 0, newHosts, 0, liveHosts.length); + newHosts[newHosts.length - 1] = host; + liveHosts = newHosts; + } + + public synchronized void onDown(Host host) { + int idx = -1; + for (int i = 0; i < liveHosts.length; i++) { + if (liveHosts[i].equals(host)) { + idx = i; + break; + } + } + + if (idx == -1) + return; + + Host[] newHosts = new Host[liveHosts.length - 1]; + if (idx == 0) { + System.arraycopy(liveHosts, 1, newHosts, 0, newHosts.length); + } else if (idx == liveHosts.length - 1) { + System.arraycopy(liveHosts, 0, newHosts, 0, newHosts.length); + } else { + System.arraycopy(liveHosts, 0, newHosts, 0, idx); + System.arraycopy(liveHosts, idx + 1, newHosts, idx, liveHosts.length - idx - 1); + } + liveHosts = newHosts; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 87f38ece0b2..0b7bcbb8452 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -1,9 +1,10 @@ package com.datastax.driver.core; -import java.util.Arrays; -import java.util.List; import java.net.InetSocketAddress; +import java.util.*; +import java.util.concurrent.*; +import com.datastax.driver.core.pool.HostConnectionPool; import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; @@ -29,13 +30,11 @@ public class Session { private static final Logger logger = LoggerFactory.getLogger(Session.class); - // TODO: we can do better :) - private final Connection connection; + private final Manager manager; // Package protected, only Cluster should construct that. - Session(List addresses) throws ConnectionException { - Connection.Factory factory = new Connection.Factory(addresses.get(0)); - this.connection = factory.open(); + Session(Cluster cluster, List hosts) { + this.manager = new Manager(cluster, hosts); } /** @@ -48,7 +47,8 @@ public class Session { * @return this session. */ public Session use(String keyspace) { - return null; + manager.setKeyspace(keyspace); + return this; } /** @@ -66,27 +66,9 @@ public Session use(String keyspace) { * be empty and will be for any non SELECT query. */ public ResultSet execute(String query) { - - // TODO: this is not the real deal, just for tests + // TODO: Deal with exceptions try { - QueryMessage msg = new QueryMessage(query); - Connection.Future future = connection.write(msg); - Message.Response response = future.get(); - - if (response.type == Message.Type.RESULT) { - ResultMessage rmsg = (ResultMessage)response; - switch (rmsg.kind) { - case VOID: - case ROWS: - return ResultSet.fromMessage(rmsg); - } - logger.info("Got " + response); - return null; - } - else { - logger.info("Got " + response); - return null; - } + return toResultSet(manager.execute(new QueryMessage(query))); } catch (Exception e) { throw new RuntimeException(e); } @@ -150,26 +132,9 @@ public ResultSet.Future executeAsync(CQLQuery query) { * @return the prepared statement corresponding to {@code query}. */ public PreparedStatement prepare(String query) { - - // TODO: this is not the real deal, just for tests + // TODO: Deal with exceptions try { - PrepareMessage msg = new PrepareMessage(query); - Connection.Future future = connection.write(msg); - Message.Response response = future.get(); - - if (response.type == Message.Type.RESULT) { - ResultMessage rmsg = (ResultMessage)response; - switch (rmsg.kind) { - case PREPARED: - return PreparedStatement.fromMessage((ResultMessage.Prepared)rmsg); - } - logger.info("Got " + response); - return null; - } - else { - logger.info("Got " + response); - return null; - } + return toPreparedStatement(manager.execute(new PrepareMessage(query))); } catch (Exception e) { throw new RuntimeException(e); } @@ -200,29 +165,9 @@ public PreparedStatement prepare(CQLQuery query) { * be empty and will be for any non SELECT query. */ public ResultSet executePrepared(BoundStatement stmt) { - // TODO: this is not the real deal, just for tests + // TODO: Deal with exceptions try { - if (!stmt.ready()) - throw new IllegalArgumentException("Provided statement has some variables not bound to values"); - - ExecuteMessage msg = new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values)); - Connection.Future future = connection.write(msg); - Message.Response response = future.get(); - - if (response.type == Message.Type.RESULT) { - ResultMessage rmsg = (ResultMessage)response; - switch (rmsg.kind) { - case VOID: - case ROWS: - return ResultSet.fromMessage(rmsg); - } - logger.info("Got " + response); - return null; - } - else { - logger.info("Got " + response); - return null; - } + return toResultSet(manager.execute(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values)))); } catch (Exception e) { throw new RuntimeException(e); } @@ -243,4 +188,142 @@ public ResultSet executePrepared(BoundStatement stmt) { public ResultSet.Future executePreparedAsync(BoundStatement stmt) { return null; } + + private ResultSet toResultSet(Connection.Future future) { + try { + Message.Response response = future.get(); + switch (response.type) { + case RESULT: + return ResultSet.fromMessage((ResultMessage)response); + case ERROR: + // TODO: handle errors + logger.info("Got " + response); + return null; + default: + // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) + logger.info("Got " + response); + return null; + } + } catch (Exception e) { + // TODO: do better + throw new RuntimeException(e); + } + } + + private PreparedStatement toPreparedStatement(Connection.Future future) { + try { + Message.Response response = future.get(); + switch (response.type) { + case RESULT: + return PreparedStatement.fromMessage((ResultMessage)response); + case ERROR: + // TODO: handle errors + logger.info("Got " + response); + return null; + default: + // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) + logger.info("Got " + response); + return null; + } + } catch (Exception e) { + // TODO: do better + throw new RuntimeException(e); + } + } + + private static class Manager implements Host.StateListener { + + private final Cluster cluster; + + private final ConcurrentMap pools; + private final LoadBalancingPolicy loadBalancer; + + private final HostConnectionPool.Configuration poolsConfiguration; + + // TODO: Make that configurable + private final long DEFAULT_CONNECTION_TIMEOUT = 3000; + + public Manager(Cluster cluster, List hosts) { + this.cluster = cluster; + + // TODO: consider the use of NonBlockingHashMap + this.pools = new ConcurrentHashMap(hosts.size()); + this.loadBalancer = new RoundRobinPolicy(); + this.poolsConfiguration = new HostConnectionPool.Configuration(); + + loadBalancer.initialize(hosts); + + for (Host host : hosts) { + logger.debug("Adding new host " + host); + host.monitor().register(this); + + addHost(host); + // If we fail to connect, the pool will be shutdown right away + if (pools.get(host).isShutdown()) { + logger.debug("Cannot connect to " + host); + pools.remove(host); + } + } + } + + private HostConnectionPool addHost(Host host) { + return pools.put(host, new HostConnectionPool(host, cluster.connectionFactory, poolsConfiguration)); + } + + public void onUp(Host host) { + HostConnectionPool previous = addHost(host);; + loadBalancer.onUp(host); + + // This should not be necessary but it's harmless + if (previous != null) + previous.shutdown(); + } + + public void onDown(Host host) { + loadBalancer.onDown(host); + HostConnectionPool pool = pools.remove(host); + + // This should not be necessary but it's harmless + if (pool != null) + pool.shutdown(); + } + + public void setKeyspace(String keyspace) { + poolsConfiguration.setKeyspace(keyspace); + } + + /** + * Execute the provided request. + * + * This method will find a suitable node to connect to using the {@link LoadBalancingPolicy} + * and handle host failover. + * + * @return a future on the response to the request. + */ + public Connection.Future execute(Message.Request msg) { + + Iterator plan = loadBalancer.newQueryPlan(); + while (plan.hasNext()) { + Host host = plan.next(); + HostConnectionPool pool = pools.get(host); + if (pool == null || pool.isShutdown()) + continue; + + try { + Connection connection = pool.borrowConnection(DEFAULT_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); + try { + return connection.write(msg); + } finally { + pool.returnConnection(connection); + } + } catch (ConnectionException e) { + logger.trace("Error: " + e.getMessage()); + // If we have any problem with the connection, just move to the next node. + // If that happens during the write of the request, the pool act on the error during returnConnection. + } + } + // TODO: Change that to a "NoAvailableHostException" + throw new RuntimeException(); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java b/driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java new file mode 100644 index 00000000000..01cf54de514 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java @@ -0,0 +1,55 @@ +package com.datastax.driver.core.pool; + +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; + +import com.datastax.driver.core.ConvictionPolicy; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.transport.ConnectionException; + +public class HealthMonitor { + + private final Host host; + private final ConvictionPolicy policy; + + private Set listeners = new CopyOnWriteArraySet(); + private volatile boolean isUp; + + public HealthMonitor(Host host, ConvictionPolicy policy) { + this.host = host; + this.isUp = true; + this.policy = policy; + } + + public void register(Host.StateListener listener) { + listeners.add(listener); + } + + public void unregister(Host.StateListener listener) { + listeners.add(listener); + } + + public boolean isUp() { + return isUp; + } + + boolean signalConnectionFailure(ConnectionException exception) { + boolean isDown = policy.addFailure(exception); + if (isDown) + setDown(); + return isDown; + } + + // TODO: Should we bother making sure that multiple calls to this don't inform the listeners twice? + private void setDown() { + isUp = false; + for (Host.StateListener listener : listeners) + listener.onDown(host); + } + + public void reset() { + isUp = true; + for (Host.StateListener listener : listeners) + listener.onUp(host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java new file mode 100644 index 00000000000..7247aedaa58 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java @@ -0,0 +1,225 @@ +package com.datastax.driver.core.pool; + +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import com.datastax.driver.core.Host; +import com.datastax.driver.core.transport.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HostConnectionPool { + + private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); + + private final Host host; + private final Connection.Factory factory; + + private final AtomicBoolean isShutdown = new AtomicBoolean(); + + private final BlockingQueue available = new LinkedBlockingQueue(); + + private final AtomicInteger open = new AtomicInteger(0); + private final AtomicInteger borrowed = new AtomicInteger(0); + private final AtomicInteger waitingThreads = new AtomicInteger(0); + + private final Configuration configuration; + + // TODO: We could share that executor across pools + private final ExecutorService openExecutor = Executors.newCachedThreadPool(); + private final Runnable newConnectionTask; + + public HostConnectionPool(Host host, Connection.Factory factory, Configuration configuration) { + this.host = host; + this.factory = factory; + this.configuration = configuration; + + this.newConnectionTask = new Runnable() { + public void run() { + // If when we execute there is still some waiting threads, create a connection + if (waitingThreads.get() > 0) + addConnectionIfUnderMaximum(); + } + }; + + // Create initial core connections + for (int i = 0; i < configuration.coreConnections; i++) + if (!addConnection()) + break; + } + + public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException { + if (isShutdown.get()) + // TODO: have a specific exception + throw new ConnectionException(host.getAddress(), "Pool is shutdown"); + + Connection connection = available.poll(); + if (connection == null) + { + // Request the opening of a connection, unless we already know there is too much + if (open.get() < configuration.maxConnections) + openExecutor.submit(newConnectionTask); + + connection = waitForConnection(timeout, unit); + } + + borrowed.incrementAndGet(); + connection.setKeyspace(configuration.keyspace); + return connection; + } + + private boolean addConnectionIfUnderMaximum() { + + // First, make sure we don't cross the allowed limit of open connections + for(;;) { + int opened = open.get(); + if (opened >= configuration.maxConnections) + return false; + + if (open.compareAndSet(opened, opened + 1)) + break; + } + return addConnection(); + } + + private boolean addConnection() { + try { + available.offer(factory.open(host)); + + if (isShutdown.get()) { + discardAvailableConnections(); + return false; + } else { + return true; + } + } catch (ConnectionException e) { + logger.debug("Connection error to " + host + ", signaling monitor"); + if (host.monitor().signalConnectionFailure(e)) + shutdown(); + return false; + } + } + + // This is guaranteed to either return a connection or throw an exception + private Connection waitForConnection(long timeout, TimeUnit unit) throws ConnectionException { + waitingThreads.incrementAndGet(); + try { + Connection connection = available.poll(timeout, unit); + if (connection == null) + // TODO: maybe create a special exception for that + throw new ConnectionException(host.getAddress(), "No free connection available"); + return connection; + } catch (InterruptedException e) { + throw new RuntimeException(); + } finally { + waitingThreads.decrementAndGet(); + } + } + + public void returnConnection(Connection connection) { + borrowed.decrementAndGet(); + + if (connection.isDefunct()) { + if (host.monitor().signalConnectionFailure(connection.lastException())) + shutdown(); + // TODO: make the close async + connection.close(); + open.decrementAndGet(); + return; + } + + // Return the connection as available if we have <= core connections opened, or if we have waiting threads. + // Otherwise, close it (but if some other thread beats us at closing, keep available) + if (waitingThreads.get() > 0 || open.get() <= configuration.coreConnections || !closeConnectionIfIdle(connection)) { + available.offer(connection); + + // Sanity check + if (isShutdown.get()) + discardAvailableConnections(); + } + } + + public boolean closeConnectionIfIdle(Connection connection) { + for (;;) { + int opened = open.get(); + if (opened <= configuration.coreConnections) { + return false; + } + assert opened > 0; + if (open.compareAndSet(opened, opened - 1)) + break; + } + // TODO: maybe we should do the close asynchronously? + connection.close(); + open.decrementAndGet(); + return true; + } + + // Open connections if there is < core and close some if there is > core and some are idle + public void ensureCoreSize() { + int opened = open.get(); + if (opened < configuration.coreConnections) { + while (addConnectionIfUnderMaximum()); + } else { + Connection connection = available.poll(); + while (connection != null && closeConnectionIfIdle(connection)) + connection = available.poll(); + } + } + + public boolean isShutdown() { + return isShutdown.get(); + } + + public void shutdown() { + if (!isShutdown.compareAndSet(false, true)) + return; + + logger.debug("Shutting down pool"); + + // TODO: we can have threads waiting for connection on the queue. + // It would be nice to be able to wake them up here (otherwise they + // will have to wait for the timeout). One option would be to feed some + // fake connections object to available that borrow would recognize + + discardAvailableConnections(); + } + + private void discardAvailableConnections() { + while (!available.isEmpty()) { + // TODO: If we make the close async, wait for it here + available.poll().close(); + open.decrementAndGet(); + } + } + + public static class Configuration { + + private volatile String keyspace; + + private volatile int coreConnections = 2; + private volatile int maxConnections = 100; + + public void setKeyspace(String keyspace) { + this.keyspace = keyspace; + } + + public void setCoreConnections(int value) { + coreConnections = value; + } + + public int getCoreConnections() { + return coreConnections; + } + + public void setMaxConnections(int value) { + maxConnections = value; + } + + public int getMaxConnections() { + return maxConnections; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index cdaf4713dc9..5a019cd266e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -1,13 +1,13 @@ package com.datastax.driver.core.transport; -import com.datastax.driver.core.utils.SimpleFuture; - import java.net.InetSocketAddress; import java.util.Collections; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.ExecutorService; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import com.datastax.driver.core.Host; +import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.transport.*; @@ -16,16 +16,21 @@ import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.channel.*; import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A connection to a Cassandra Node. */ public class Connection extends org.apache.cassandra.transport.Connection { + private static final Logger logger = LoggerFactory.getLogger(Connection.class); + // TODO: that doesn't belong here private static final String CQL_VERSION = "3.0.0"; public final InetSocketAddress address; + private final String name; private final ClientBootstrap bootstrap; private final Channel channel; @@ -33,7 +38,12 @@ public class Connection extends org.apache.cassandra.transport.Connection private final Dispatcher dispatcher = new Dispatcher(); private AtomicInteger inFlight = new AtomicInteger(0); - private volatile boolean shutdown; + private volatile boolean isClosed; + private volatile String keyspace; + + private volatile boolean isDefunct; + private volatile ConnectionException exception; + /** * Create a new connection to a Cassandra node. @@ -43,24 +53,38 @@ public class Connection extends org.apache.cassandra.transport.Connection * @throws ConnectionException if the connection attempts fails or is * refused by the server. */ - private Connection(InetSocketAddress address, Factory factory) throws ConnectionException { + private Connection(String name, InetSocketAddress address, Factory factory) throws ConnectionException { this.address = address; this.factory = factory; + this.name = name; this.bootstrap = factory.bootstrap(); bootstrap.setPipelineFactory(new PipelineFactory(this)); ChannelFuture future = bootstrap.connect(address); - // Wait until the connection attempt succeeds or fails. - this.channel = future.awaitUninterruptibly().getChannel(); - if (!future.isSuccess()) - { - bootstrap.releaseExternalResources(); - throw new TransportException(address, "Cannot connect", future.getCause()); + inFlight.incrementAndGet(); + try { + // Wait until the connection attempt succeeds or fails. + this.channel = future.awaitUninterruptibly().getChannel(); + if (!future.isSuccess()) + { + logger.debug(String.format("[%s] Error connecting to %s%s", name, address, extractMessage(future.getCause()))); + throw new TransportException(address, "Cannot connect", future.getCause()); + } + } finally { + inFlight.decrementAndGet(); } + logger.trace(String.format("[%s] Connection opened successfully", name)); initializeTransport(); + logger.trace(String.format("[%s] Transport initialized and ready", name)); + } + + private static String extractMessage(Throwable t) { + if (t == null || t.getMessage().isEmpty()) + return ""; + return " (" + t.getMessage() + ")"; } private void initializeTransport() throws ConnectionException { @@ -75,16 +99,54 @@ private void initializeTransport() throws ConnectionException { case READY: break; case ERROR: - throw new TransportException(address, String.format("Error initializing connection: %s", ((ErrorMessage)response).errorMsg)); + throw defunct(new TransportException(address, String.format("Error initializing connection: %s", ((ErrorMessage)response).errorMsg))); case AUTHENTICATE: throw new TransportException(address, "Authentication required but not yet supported"); default: - throw new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type)); + throw defunct(new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type))); } } catch (ExecutionException e) { - throw new ConnectionException(address, "Unexpected error during transport initialization", e.getCause()); + throw defunct(new ConnectionException(address, "Unexpected error during transport initialization", e.getCause())); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + public boolean isDefunct() { + return isDefunct; + } + + public ConnectionException lastException() { + return exception; + } + + private ConnectionException defunct(ConnectionException e) { + exception = e; + isDefunct = true; + return e; + } + + public String keyspace() { + return keyspace; + } + + public void setKeyspace(String keyspace) throws ConnectionException { + if (keyspace == null) + return; + + if (this.keyspace != null && this.keyspace.equals(keyspace)) + return; + + try { + logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); + write(new QueryMessage("USE " + keyspace)).get(); + this.keyspace = keyspace; + } catch (ConnectionException e) { + throw defunct(e); + } catch (ExecutionException e) { + throw defunct(new ConnectionException(address, "Error while setting keyspace", e)); } catch (InterruptedException e) { - throw new RuntimeException(); + throw new RuntimeException(e); } } @@ -98,7 +160,10 @@ private void initializeTransport() throws ConnectionException { * @throws TransportException if an I/O error while sending the request */ public Future write(Message.Request request) throws ConnectionException { - if (shutdown) + if (isDefunct) + throw new ConnectionException(address, "Write attempt on defunct connection"); + + if (isClosed) throw new ConnectionException(address, "Connection has been closed"); request.attach(this); @@ -112,16 +177,26 @@ public Future write(Message.Request request) throws ConnectionException { Future future = new Future(this); // TODO: This assumes the connection is used synchronously, fix that at some point - dispatcher.setFuture(future); + dispatcher.futureRef.set(future); + logger.trace(String.format("[%s] writting request %s", name, request)); ChannelFuture writeFuture = channel.write(request); writeFuture.awaitUninterruptibly(); if (!writeFuture.isSuccess()) { - dispatcher.setFuture(null); - throw new TransportException(address, "Error writting", writeFuture.getCause()); + logger.debug(String.format("[%s] Error writting request %s", name, request)); + + ConnectionException ce; + if (writeFuture.getCause() instanceof java.nio.channels.ClosedChannelException) { + ce = new TransportException(address, "Error writting: Closed channel"); + } else { + ce = new TransportException(address, "Error writting", writeFuture.getCause()); + } + dispatcher.futureRef.set(null); + throw defunct(ce); } + logger.trace(String.format("[%s] request sent successfully", name)); return future; } finally { @@ -130,20 +205,27 @@ public Future write(Message.Request request) throws ConnectionException { } public void close() { - // Make sure all new writes are rejected - shutdown = true; + if (isClosed) + return; - try { - // Busy waiting, we just wait for request to be fully written, shouldn't take long - while (inFlight.get() > 0) { - Thread.sleep(10); + // TODO: put that to trace + logger.debug(String.format("[%s] closing connection", name)); + + // Make sure all new writes are rejected + isClosed = true; + + if (!isDefunct) { + try { + // Busy waiting, we just wait for request to be fully written, shouldn't take long + while (inFlight.get() > 0) + Thread.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); } - } catch (InterruptedException e) { - throw new RuntimeException(e); } channel.close().awaitUninterruptibly(); - bootstrap.releaseExternalResources(); + // Note: we must not call releaseExternalResources, because this shutdown the executors, which are shared } // Cruft needed because we reuse server side classes, but we don't care about it @@ -151,16 +233,15 @@ public void close() { public void applyStateTransition(Message.Type requestType, Message.Type responseType) {}; public ClientState clientState() { return null; }; + // TODO: We shouldn't need one factory per-host. We should just have one + // global factory that allow to set the connections parameters and use that everywhere public static class Factory { + // TODO We could share those amongst factories private final ExecutorService bossExecutor = Executors.newCachedThreadPool(); private final ExecutorService workerExecutor = Executors.newCachedThreadPool(); - private final InetSocketAddress address; - - public Factory(InetSocketAddress address) { - this.address = address; - } + private final ConcurrentMap idGenerators = new ConcurrentHashMap(); /** * Opens a new connection to the node this factory points to. @@ -169,8 +250,21 @@ public Factory(InetSocketAddress address) { * * @throws ConnectionException if connection attempt fails. */ - public Connection open() throws ConnectionException { - return new Connection(address, this); + public Connection open(Host host) throws ConnectionException { + InetSocketAddress address = host.getAddress(); + String name = address.toString() + "-" + getIdGenerator(host).getAndIncrement(); + return new Connection(name, address, this); + } + + private AtomicInteger getIdGenerator(Host host) { + AtomicInteger g = idGenerators.get(host); + if (g == null) { + g = new AtomicInteger(0); + AtomicInteger old = idGenerators.putIfAbsent(host, g); + if (old != null) + g = old; + } + return g; } private ClientBootstrap bootstrap() { @@ -183,34 +277,49 @@ private ClientBootstrap bootstrap() { return b; } - } private class Dispatcher extends SimpleChannelUpstreamHandler { - private volatile Future future; - - public void setFuture(Future future) { - this.future = future; - } + private final AtomicReference futureRef = new AtomicReference(); @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { + logger.trace(String.format("[%s] received ", e.getMessage())); + + // As soon as we set the value to the currently set future, a new write could + // be started, so reset the futureRef *before* setting the future for this query. + Future future = futureRef.getAndSet(null); + // TODO: we should do something better than just throwing an exception if (future == null) throw new RuntimeException(String.format("Received %s but no future set", e.getMessage())); - // As soon as we set the value to the currently set future, a new write could - // be started, so reset the local variable to null *before* setting the future for this query. - Future current = future; - future = null; - if (!(e.getMessage() instanceof Message.Response)) { - current.setException(new TransportException(address, "Unexpected message received: " + e.getMessage())); + logger.debug(String.format("[%s] Received unexpected message: %s", name, e.getMessage())); + ConnectionException ce = new TransportException(address, "Unexpected message received: " + e.getMessage()); + defunct(ce); + future.setException(ce); } else { - current.set((Message.Response)e.getMessage()); + future.set((Message.Response)e.getMessage()); } } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { + logger.trace(String.format("[%s] connection error", name), e.getCause()); + + // Ignore exception while writting, this will be handled by write() directly + if (inFlight.get() > 0) + return; + + ConnectionException ce = new TransportException(address, "Unexpected exception triggered", e.getCause()); + defunct(ce); + + Future future = futureRef.getAndSet(null); + if (future != null) + future.setException(ce); + } } public static class Future extends SimpleFuture { @@ -221,8 +330,7 @@ public Future(Connection connection) { } } - private static class PipelineFactory implements ChannelPipelineFactory - { + private static class PipelineFactory implements ChannelPipelineFactory { // Stateless handlers private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); private static final Message.ProtocolEncoder messageEncoder = new Message.ProtocolEncoder(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java new file mode 100644 index 00000000000..77b13f320ac --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java @@ -0,0 +1,30 @@ +package com.datastax.driver.core.utils; + +import com.datastax.driver.core.ConvictionPolicy; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.transport.ConnectionException; + +public class SimpleConvictionPolicy implements ConvictionPolicy { + + private final Host host; + + private SimpleConvictionPolicy(Host host) { + this.host = host; + } + + public boolean addFailure(ConnectionException exception) { + // TODO: be kinder + return true; + } + + public boolean addFailureFromExternalDetector() { + return true; + } + + public static class Factory implements ConvictionPolicy.Factory { + + public ConvictionPolicy create(Host host) { + return new SimpleConvictionPolicy(host); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 3c9883b9af2..5e1dfc0b601 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -19,82 +19,99 @@ public class SessionTest { public static void classSetUp() { Logger rootLogger = Logger.getRootLogger(); if (!rootLogger.getAllAppenders().hasMoreElements()) { - rootLogger.setLevel(Level.INFO); + rootLogger.setLevel(Level.DEBUG); rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); } } - @Test - public void SimpleExecuteTest() throws Exception { + //@Test + //public void MultiNodeExecuteTest() throws Exception { - Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); - Session session = cluster.connect(); + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); + // Session session = cluster.connect(); - session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); - session.execute("USE test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + // session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); + // session.use("test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - ResultSet rs; + // ResultSet rs; - rs = session.execute("INSERT INTO test (k, i, f) VALUES ('foo', 0, 0.2)"); - assertTrue(rs.isExhausted()); + // rs = session.execute("INSERT INTO test (k, i, f) VALUES ('foo', 0, 0.2)"); + // assertTrue(rs.isExhausted()); - rs = session.execute("INSERT INTO test (k, i, f) VALUES ('bar', 1, 3.4)"); - assertTrue(rs.isExhausted()); + // rs = session.execute("INSERT INTO test (k, i, f) VALUES ('bar', 1, 3.4)"); + // assertTrue(rs.isExhausted()); - rs = session.execute("SELECT * FROM test"); - List l = rs.fetchAll(); - assertEquals(2, l.size()); + // rs = session.execute("SELECT * FROM test"); + // List l = rs.fetchAll(); + // assertEquals(2, l.size()); - CQLRow r; - r = l.get(0); - assertEquals("bar", r.getString(0)); - assertEquals("bar", r.getString("k")); - assertEquals(1, r.getInt("i")); - assertEquals(3.4, r.getFloat("f"), 0.01); + // CQLRow r; + // r = l.get(0); + // assertEquals("bar", r.getString(0)); + // assertEquals("bar", r.getString("k")); + // assertEquals(1, r.getInt("i")); + // assertEquals(3.4, r.getFloat("f"), 0.01); - r = l.get(1); - assertEquals("foo", r.getString("k")); - assertEquals(0, r.getInt("i")); - assertEquals(0.2, r.getFloat("f"), 0.01); - } + // r = l.get(1); + // assertEquals("foo", r.getString("k")); + // assertEquals(0, r.getInt("i")); + // assertEquals(0.2, r.getFloat("f"), 0.01); + //} - @Test - public void PreparedStatementTest() throws Exception { + //@Test + //public void PreparedStatementTest() throws Exception { - Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); - Session session = cluster.connect(); + // Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); + // Session session = cluster.connect(); + + // session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); + // session.use("test_ks"); + // session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); - session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); - session.execute("USE test_ks"); - session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); + // PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); + // PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); - PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); - PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); + // ResultSet rs; + // BoundStatement bs; - ResultSet rs; - BoundStatement bs; + // bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); + // rs = session.executePrepared(bs); - bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); - rs = session.executePrepared(bs); + // bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); + // rs = session.executePrepared(bs); - bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); - rs = session.executePrepared(bs); + // bs = selectStmt.newBoundStatement().setString("k", "prep"); + // rs = session.executePrepared(bs); + // List l = rs.fetchAll(); + // assertEquals(2, l.size()); - bs = selectStmt.newBoundStatement().setString("k", "prep"); - rs = session.executePrepared(bs); - List l = rs.fetchAll(); - assertEquals(2, l.size()); + // CQLRow r; + // r = l.get(0); + // assertEquals("prep", r.getString(0)); + // assertEquals(1, r.getInt("i")); + // assertEquals(0.1, r.getFloat("f"), 0.01); - CQLRow r; - r = l.get(0); - assertEquals("prep", r.getString(0)); - assertEquals(1, r.getInt("i")); - assertEquals(0.1, r.getFloat("f"), 0.01); + // r = l.get(1); + // assertEquals("prep", r.getString("k")); + // assertEquals(2, r.getInt("i")); + // assertEquals(0.2, r.getFloat("f"), 0.01); + //} - r = l.get(1); - assertEquals("prep", r.getString("k")); - assertEquals(2, r.getInt("i")); - assertEquals(0.2, r.getFloat("f"), 0.01); + @Test + public void MultiNodeContinuousExecuteTest() throws Exception { + + Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); + Session session = cluster.connect(); + + session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 2"); + session.use("test_ks"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + + for (int i = 0; i < 10000; ++i) { + System.out.println(">> " + i); + session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + Thread.currentThread().sleep(1000); + } } } From d93ebabf469cfe25de2b70c03284fc637d341923 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 30 Jul 2012 17:02:15 +0200 Subject: [PATCH 017/719] Cleanups and additional javadoc --- .../com/datastax/driver/core/Cluster.java | 113 ++++++++++----- .../driver/core/ConvictionPolicy.java | 25 +++- .../java/com/datastax/driver/core/Host.java | 136 +++++++++++++++++- .../driver/core/LoadBalancingPolicy.java | 40 +++++- .../com/datastax/driver/core/Session.java | 14 +- .../driver/core/pool/HealthMonitor.java | 55 ------- .../driver/core/pool/HostConnectionPool.java | 9 +- .../core/{ => utils}/RoundRobinPolicy.java | 22 ++- .../core/utils/SimpleConvictionPolicy.java | 2 + 9 files changed, 306 insertions(+), 110 deletions(-) delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java rename driver-core/src/main/java/com/datastax/driver/core/{ => utils}/RoundRobinPolicy.java (81%) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 2c1d3313369..36ba79ca1b4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -8,6 +8,7 @@ import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; import com.datastax.driver.core.utils.SimpleConvictionPolicy; +import com.datastax.driver.core.utils.RoundRobinPolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -17,54 +18,32 @@ *

* This is the main entry point of the driver. A simple example of access to a * Cassandra cluster would be: - * + *

  *   Cluster cluster = new Cluster.Builder().addContactPoints("192.168.0.1").build();
  *   Session session = cluster.connect("db1");
  *
  *   for (CQLRow row : session.execute("SELECT * FROM table1"))
  *       // do something ...
- * 
+ * 
*

* A cluster object maintains a permanent connection to one of the cluster node * that it uses solely to maintain informations on the state and current * topology of the cluster. Using the connection, the driver will discover all * the nodes composing the cluster as well as new nodes joining the cluster. - * You can disable that connection through the disableStateConnection() method. - * This is however discouraged as it means queries will only ever be executed - * against node set as contact point. If you want to limit the number of nodes - * to which this driver connects to, prefer maxConnectedNode(). */ -public class Cluster implements Host.StateListener { +public class Cluster { private static final Logger logger = LoggerFactory.getLogger(Cluster.class); - private final List contactPoints; - - private final Set sessions = new CopyOnWriteArraySet(); - final Connection.Factory connectionFactory = new Connection.Factory(); - - private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); - // TODO: Make that configurable private final ConvictionPolicy.Factory convictionPolicyFactory = new SimpleConvictionPolicy.Factory(); + final Manager manager; private Cluster(List contactPoints) { - this.contactPoints = new ArrayList(contactPoints.size()); + List hosts = new ArrayList(contactPoints.size()); for (InetSocketAddress address : contactPoints) - this.contactPoints.add(new Host(address, convictionPolicyFactory)); - } - - public void onUp(Host host) { - // Nothing specific - // TODO: We should register reconnection attempts, to avoid starting two of - // them and if this method is called by other means that the - // reconnection handler (like C* tells us it's up), cancel the latter - } - - public void onDown(Host host) { - // Note: we'll basically waste the first successful reconnection that way, but it's probably not a big deal - logger.debug(String.format("%s is down, scheduling connection retries", host)); - new ReconnectionHandler(host, scheduledExecutor, connectionFactory).start(); + hosts.add(new Host(address, convictionPolicyFactory)); + this.manager = new Manager(hosts); } /** @@ -86,12 +65,7 @@ public static Cluster buildFrom(Configuration config) { * @return a new session on this cluster sets to no keyspace. */ public Session connect() { - for (Host host : contactPoints) - host.monitor().register(this); - - Session session = new Session(this, contactPoints); - sessions.add(session); - return session; + return manager.newSession(); } /** @@ -130,11 +104,23 @@ public Session connect(String keyspace, AuthInfo authInfo) { return connect(authInfo).use(keyspace); } + /** + * Configuration for {@link Cluster} instances. + */ public interface Configuration { + /** + * Returns the initial Cassandra hosts to connect to. + * + * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} + * for more details on contact points. + */ public List contactPoints(); } + /** + * Helper class to build {@link Cluster} instances. + */ public static class Builder implements Configuration { // TODO: might not be the best default port, look at changing in C* @@ -253,4 +239,61 @@ public Cluster build() { return Cluster.buildFrom(this); } } + + /** + * The sessions and hosts managed by this a Cluster instance. + * + * Note: the reason we create a Manager object separate from Cluster is + * that Manager is not publicly visible. For instance, we wouldn't want + * user to be able to call the {@link #onUp} and {@link #onDown} methods. + */ + class Manager implements Host.StateListener { + + // Initial contacts point + private final List contactPoints; + + private final Set sessions = new CopyOnWriteArraySet(); + + final Connection.Factory connectionFactory = new Connection.Factory(); + + // TODO: make configurable + final LoadBalancingPolicy.Factory loadBalancingFactory = new RoundRobinPolicy.Factory(); + + private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); + + private Manager(List contactPoints) { + this.contactPoints = contactPoints; + + // TODO: this probably belong some place else + for (Host host : contactPoints) + host.monitor().register(this); + } + + private Session newSession() { + Session session = new Session(Cluster.this, contactPoints); + sessions.add(session); + return session; + } + + public void onUp(Host host) { + // Nothing specific + // TODO: We should register reconnection attempts, to avoid starting two of + // them and if this method is called by other means that the + // reconnection handler (like C* tells us it's up), cancel the latter + } + + public void onDown(Host host) { + // Note: we'll basically waste the first successful reconnection that way, but it's probably not a big deal + logger.debug(String.format("%s is down, scheduling connection retries", host)); + new ReconnectionHandler(host, scheduledExecutor, connectionFactory).start(); + } + + public void onAdd(Host host) { + // TODO + } + + public void onRemove(Host host) { + // TODO + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java index 5ce39aab17f..38bbe88a074 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -2,15 +2,34 @@ import com.datastax.driver.core.transport.ConnectionException; +/** + * The policy with which to decide whether a host should be considered down. + */ public interface ConvictionPolicy { - // Return wether to mark the node down + /** + * Called when a connection error occurs on a connection to the host this policy applies to. + * + * @return {@code true} if the host should be considered down. + */ public boolean addFailure(ConnectionException exception); - // Return wether to mark the node down - public boolean addFailureFromExternalDetector(); + /** + * Called when the host has been detected up. + */ + public void reset(); + /** + * Simple factory interface to allow creating {@link ConvictionPolicy} instances. + */ public interface Factory { + + /** + * Creates a new ConvictionPolicy instance for {@code host}. + * + * @param host the host this policy applies to + * @return the newly created {@link ConvictionPolicy} instance. + */ public ConvictionPolicy create(Host host); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 446b55144d2..45e405baa8e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -1,12 +1,15 @@ package com.datastax.driver.core; import java.net.InetSocketAddress; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; -import com.datastax.driver.core.pool.HealthMonitor; import com.datastax.driver.core.transport.ConnectionException; /** * A Cassandra node. + * + * This class keeps the informations the driver maintain on a given Cassandra node. */ public class Host { @@ -18,13 +21,27 @@ public class Host { throw new NullPointerException(); this.address = address; - this.monitor = new HealthMonitor(this, policy.create(this)); + this.monitor = new HealthMonitor(policy.create(this)); } + /** + * Returns the node address. + * + * @return the node {@link InetSocketAddress}. + */ public InetSocketAddress getAddress() { return address; } + /** + * Returns the health monitor for this host. + * + * The health monitor keeps tracks of the known host state (up or down). A + * class implementing {@link Host.StateListener} can also register against + * the healt monitor to be notified when this node is detected down/up. + * + * @return the host {@link HealthMonitor}. + */ public HealthMonitor monitor() { return monitor; } @@ -47,14 +64,121 @@ public String toString() { return address.toString(); } - // TODO: see if we can make that package protected (we should at leat not - // make the HealthMonitor interface public itself, but only the - // AbstractHealthMonitor abstract methods). The rational being that we don't - // want user to call onUp and onDown themselves. + /** + * Tracks the health of a node and notify listeners when a host is considered up or down. + */ + public class HealthMonitor { + + private final ConvictionPolicy policy; + + private Set listeners = new CopyOnWriteArraySet(); + private volatile boolean isUp; + + // This is a hack (I did not find a much cleaner option) to not expose + // signalConnectionFailure publicly but still being able to call it + // from other packages (typically from HostConnectionPool). + final Signaler signaler; + + HealthMonitor(ConvictionPolicy policy) { + this.policy = policy; + this.isUp = true; + this.signaler = new Signaler(); + } + + /** + * Register the provided listener to be notified on up/down events. + * + * Registering the same listener multiple times is a no-op. + * + * @param listener the new {@linke Host.StateListener} to register. + */ + public void register(StateListener listener) { + listeners.add(listener); + } + + /** + * Unregister a given provided listener. + * + * This method is a no-op if {@code listener} hadn't previously be + * registered against this monitor. + * + * @param listener the {@linke Host.StateListener} to unregister. + */ + public void unregister(StateListener listener) { + listeners.add(listener); + } + + /** + * Returns whether the host is considered up by this monitor. + * + * @return whether the node is considered up. + */ + public boolean isUp() { + return isUp; + } + + // TODO: Should we bother making sure that multiple calls to this don't inform the listeners twice? + private void setDown() { + isUp = false; + for (Host.StateListener listener : listeners) + listener.onDown(Host.this); + } + + /** + * Reset the monitor, setting the host as up and informing the + * registered listener that the node is up. + */ + public void reset() { + isUp = true; + policy.reset(); + for (Host.StateListener listener : listeners) + listener.onUp(Host.this); + } + + public class Signaler { + public boolean signalConnectionFailure(ConnectionException exception) { + boolean isDown = policy.addFailure(exception); + if (isDown) + setDown(); + return isDown; + } + } + } + + /** + * Interface for listener that are interested in hosts add, up, down and + * remove events. + */ public interface StateListener { + /** + * Called when a new node is added to the cluster. + * + * The newly added node should be considered up. + * + * @param host the host that has been newly added. + */ + public void onAdd(Host host); + + /** + * Called when a node is detected up. + * + * @param host the host that has been detected up. + */ public void onUp(Host host); + + /** + * Called when a node is detected down. + * + * @param host the host that has been detected down. + */ public void onDown(Host host); + /** + * Called when a node is remove from the cluster. + * + * @param host the removed host. + */ + public void onRemove(Host host); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java index 65cea1bf6b0..94e6babb122 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java @@ -3,9 +3,45 @@ import java.util.Collection; import java.util.Iterator; +/** + * The policy that decides which Cassandra hosts to contact for each new query. + * + * The main method to implement is {@link LoadBalancingPolicy#newQueryPlan} and + * is used for each query to find which host to query, and which hosts use as + * failover. + * + * The {@code LoadBalancingPolicy} is a {@link Host.StateListener} and is thus + * informed of hosts up/down events. For efficiency purposes, the policy is + * expected to exclude down hosts from query plans. + */ public interface LoadBalancingPolicy extends Host.StateListener { - public void initialize(Collection hosts); - + /** + * Returns the hosts to use for a given query. + * + * Each new query will call this method. The first host in the result will + * then be used to perform the query. In the even of a connection problem + * (the queried host is down or appear to be so), the next host will be + * used. If all hosts of the returned {@code Iterator} are down, the query + * will fail. + * + * @return an iterator of Host. The query is tried against the hosts + * returned by this iterator in order, until the query has been sent + * successfully to one of the host. + */ public Iterator newQueryPlan(); + + /** + * Simple factory interface to allow creating {@link LoadBalancingPolicy} instances. + */ + public interface Factory { + + /** + * Creates a new LoadBalancingPolicy instance over the provided (initial) {@code hosts}. + * + * @param hosts the initial hosts to use. + * @return the newly created {@link LoadBalancingPolicy} instance. + */ + public LoadBalancingPolicy create(Collection hosts); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 0b7bcbb8452..ebf4b85a30e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -248,11 +248,9 @@ public Manager(Cluster cluster, List hosts) { // TODO: consider the use of NonBlockingHashMap this.pools = new ConcurrentHashMap(hosts.size()); - this.loadBalancer = new RoundRobinPolicy(); + this.loadBalancer = cluster.manager.loadBalancingFactory.create(hosts); this.poolsConfiguration = new HostConnectionPool.Configuration(); - loadBalancer.initialize(hosts); - for (Host host : hosts) { logger.debug("Adding new host " + host); host.monitor().register(this); @@ -267,7 +265,7 @@ public Manager(Cluster cluster, List hosts) { } private HostConnectionPool addHost(Host host) { - return pools.put(host, new HostConnectionPool(host, cluster.connectionFactory, poolsConfiguration)); + return pools.put(host, new HostConnectionPool(host, host.monitor().signaler, cluster.manager.connectionFactory, poolsConfiguration)); } public void onUp(Host host) { @@ -288,6 +286,14 @@ public void onDown(Host host) { pool.shutdown(); } + public void onAdd(Host host) { + // TODO + } + + public void onRemove(Host host) { + // TODO + } + public void setKeyspace(String keyspace) { poolsConfiguration.setKeyspace(keyspace); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java b/driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java deleted file mode 100644 index 01cf54de514..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/pool/HealthMonitor.java +++ /dev/null @@ -1,55 +0,0 @@ -package com.datastax.driver.core.pool; - -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; - -import com.datastax.driver.core.ConvictionPolicy; -import com.datastax.driver.core.Host; -import com.datastax.driver.core.transport.ConnectionException; - -public class HealthMonitor { - - private final Host host; - private final ConvictionPolicy policy; - - private Set listeners = new CopyOnWriteArraySet(); - private volatile boolean isUp; - - public HealthMonitor(Host host, ConvictionPolicy policy) { - this.host = host; - this.isUp = true; - this.policy = policy; - } - - public void register(Host.StateListener listener) { - listeners.add(listener); - } - - public void unregister(Host.StateListener listener) { - listeners.add(listener); - } - - public boolean isUp() { - return isUp; - } - - boolean signalConnectionFailure(ConnectionException exception) { - boolean isDown = policy.addFailure(exception); - if (isDown) - setDown(); - return isDown; - } - - // TODO: Should we bother making sure that multiple calls to this don't inform the listeners twice? - private void setDown() { - isUp = false; - for (Host.StateListener listener : listeners) - listener.onDown(host); - } - - public void reset() { - isUp = true; - for (Host.StateListener listener : listeners) - listener.onUp(host); - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java index 7247aedaa58..e55b912f891 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java @@ -15,6 +15,8 @@ public class HostConnectionPool { private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); private final Host host; + private final Host.HealthMonitor.Signaler failureSignaler; + private final Connection.Factory factory; private final AtomicBoolean isShutdown = new AtomicBoolean(); @@ -31,8 +33,9 @@ public class HostConnectionPool { private final ExecutorService openExecutor = Executors.newCachedThreadPool(); private final Runnable newConnectionTask; - public HostConnectionPool(Host host, Connection.Factory factory, Configuration configuration) { + public HostConnectionPool(Host host, Host.HealthMonitor.Signaler signaler, Connection.Factory factory, Configuration configuration) { this.host = host; + this.failureSignaler = signaler; this.factory = factory; this.configuration = configuration; @@ -96,7 +99,7 @@ private boolean addConnection() { } } catch (ConnectionException e) { logger.debug("Connection error to " + host + ", signaling monitor"); - if (host.monitor().signalConnectionFailure(e)) + if (failureSignaler.signalConnectionFailure(e)) shutdown(); return false; } @@ -122,7 +125,7 @@ public void returnConnection(Connection connection) { borrowed.decrementAndGet(); if (connection.isDefunct()) { - if (host.monitor().signalConnectionFailure(connection.lastException())) + if (failureSignaler.signalConnectionFailure(connection.lastException())) shutdown(); // TODO: make the close async connection.close(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java similarity index 81% rename from driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java index d962991af41..02665f34f6d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java @@ -1,14 +1,17 @@ -package com.datastax.driver.core; +package com.datastax.driver.core.utils; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.LoadBalancingPolicy; + public class RoundRobinPolicy implements LoadBalancingPolicy { private volatile Host[] liveHosts; private final AtomicInteger index = new AtomicInteger(); - public void initialize(Collection hosts) { + private RoundRobinPolicy(Collection hosts) { this.liveHosts = hosts.toArray(new Host[hosts.size()]); this.index.set(new Random().nextInt(hosts.size())); } @@ -78,4 +81,19 @@ public synchronized void onDown(Host host) { } liveHosts = newHosts; } + + public void onAdd(Host host) { + onUp(host); + } + + public void onRemove(Host host) { + onDown(host); + } + + public static class Factory implements LoadBalancingPolicy.Factory { + + public LoadBalancingPolicy create(Collection hosts) { + return new RoundRobinPolicy(hosts); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java index 77b13f320ac..2c7ebcdb05c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java @@ -21,6 +21,8 @@ public boolean addFailureFromExternalDetector() { return true; } + public void reset() {} + public static class Factory implements ConvictionPolicy.Factory { public ConvictionPolicy create(Host host) { From 0cb257edee4c875ef792791d69e712093d0ac4b1 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 6 Aug 2012 10:33:57 +0200 Subject: [PATCH 018/719] Javadoc updates --- .../datastax/driver/core/BoundStatement.java | 23 ++ .../java/com/datastax/driver/core/CQLRow.java | 324 +++++++++++++++++- .../com/datastax/driver/core/Columns.java | 13 +- .../driver/core/InvalidTypeException.java | 8 + 4 files changed, 365 insertions(+), 3 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 89bc0e9fdc6..4b8791b832b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -45,16 +45,39 @@ public boolean ready() { return remaining == 0; } + /** + * Returns the PreparedStatement on which this BoundStatement is based. + * + * @return the PreparedStatement on which this BoundStatement is based. + */ + public PreparedStatement preparedStatement() { + return statement; + } + + /** + * Returns whether the {@code i}th variable has been bound to a value. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @return whether the {@code i}th variable has been bound to a value. + */ public boolean isSet(int i) { metadata().checkBounds(i); return values[i] != null; } + /** + * Returns whether the variable {@code name} has been bound to a value. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @return whether the {@code i}th variable has been bound to a value. + */ public boolean isSet(String name) { return isSet(metadata().getIdx(name)); } public BoundStatement bind(Object... values) { + // TODO return null; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 4c3bab1d8db..097c0bb31e3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -30,7 +30,7 @@ static CQLRow fromData(Columns metadata, List data) { } /** - * The columns contains in this CQLRow. + * The columns contained in this CQLRow. * * @return the columns contained in this CQLRow. */ @@ -38,16 +38,41 @@ public Columns columns() { return metadata; } + /** + * Returns whether the {@code i}th value of this row is NULL. + * + * @param i the index of the column to check. + * @return whether the {@code i}th value of this row is NULL. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + */ public boolean isNull(int i) { metadata.checkBounds(i); return data.get(i) != null; } + /** + * Returns whether the value for column {@code name} in this row is NULL. + * + * @param name the name of the column to check. + * @return whether the value of column {@code name} is NULL. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + */ public boolean isNull(String name) { return isNull(metadata.getIdx(name)); } /** + * Returns the {@code i}th value of this row has a boolean. + * + * @param i the index of the column to retrieve. + * @return the boolean value of the {@code i}th column in this row. If the + * value is NULL, {@code false} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. */ public boolean getBool(int i) { metadata.checkType(i, DataType.Native.BOOLEAN); @@ -59,10 +84,31 @@ public boolean getBool(int i) { return BooleanType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a boolean. + * + * @param name the name of the column to retrieve. + * @return the boolean value of column {@code name}. If the value is NULL, + * {@code false} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type BOOLEAN. + */ public boolean getBool(String name) { return getBool(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has an integer. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as an integer. If the + * value is NULL, {@code 0} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} is not of type INT. + */ public int getInt(int i) { metadata.checkType(i, DataType.Native.INT); @@ -73,10 +119,32 @@ public int getInt(int i) { return Int32Type.instance.compose(value); } + /** + * Returns the value of column {@code name} has an integer. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as an integer. If the value is NULL, + * {@code 0} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type INT. + */ public int getInt(String name) { return getInt(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a long. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a long. If the + * value is NULL, {@code 0L} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} type is not one of: BIGINT, TIMESTAMP, + * INT or COUNTER. + */ public long getLong(int i) { DataType type = metadata.checkType(i, DataType.Native.BIGINT, DataType.Native.TIMESTAMP, @@ -92,10 +160,32 @@ public long getLong(int i) { : LongType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a long. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a long. If the value is NULL, + * {@code 0L} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} type is not one of: BIGINT, TIMESTAMP, + * INT or COUNTER. + */ public long getLong(String name) { return getLong(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a date. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a data. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} is not of type TIMESTAMP. + */ public Date getDate(int i) { metadata.checkType(i, DataType.Native.TIMESTAMP); @@ -106,10 +196,31 @@ public Date getDate(int i) { return DateType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a date. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a date. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} is not of type TIMESTAMP. + */ public Date getDate(String name) { return getDate(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a float. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a float. If the + * value is NULL, {@code 0.0f} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} is not of type FLOAT. + */ public float getFloat(int i) { metadata.checkType(i, DataType.Native.FLOAT); @@ -120,10 +231,32 @@ public float getFloat(int i) { return FloatType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a float. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a float. If the value is NULL, + * {@code 0.0f} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} is not of type FLOAT. + */ public float getFloat(String name) { return getFloat(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a double. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a double. If the + * value is NULL, {@code 0.0} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} is not of type + * DOUBLE or FLOAT. + */ public double getDouble(int i) { DataType type = metadata.checkType(i, DataType.Native.DOUBLE, DataType.Native.FLOAT); @@ -137,6 +270,36 @@ public double getDouble(int i) { : DoubleType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a double. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a double. If the value is NULL, + * {@code 0.0} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} is not of type + * DOUBLE or FLOAT. + */ + public double getDouble(String name) { + return getDouble(metadata.getIdx(name)); + } + + /** + * Returns the {@code i}th value of this row has a ByteBuffer. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a ByteBuffer. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + */ public ByteBuffer getByteBuffer(int i) { metadata.checkBounds(i); @@ -147,10 +310,39 @@ public ByteBuffer getByteBuffer(int i) { return value.duplicate(); } + /** + * Returns the value of column {@code name} has a ByteBuffer. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a ByteBuffer. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + */ public ByteBuffer getByteBuffer(String name) { return getByteBuffer(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a byte array. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a byte array. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + */ public byte[] getBytes(int i) { ByteBuffer bb = getByteBuffer(i); byte[] result = new byte[bb.remaining()]; @@ -158,10 +350,36 @@ public byte[] getBytes(int i) { return result; } + /** + * Returns the value of column {@code name} has a byte array. + * + * Note: this method always return the bytes composing the value, even if + * the column is not of type BLOB. That is, this method never throw an + * InvalidTypeException. However, if the type is not BLOB, it is up to the + * caller to handle the returned value correctly. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a byte array. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + */ public byte[] getBytes(String name) { return getBytes(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a string. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a string. If the + * value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} type is none of: + * VARCHAR, TEXT or ASCII. + */ public String getString(int i) { DataType type = metadata.checkType(i, DataType.Native.VARCHAR, DataType.Native.TEXT, @@ -176,10 +394,32 @@ public String getString(int i) { : UTF8Type.instance.compose(value); } + /** + * Returns the value of column {@code name} has a string. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a string. If the value is NULL, + * {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} type is none of: + * VARCHAR, TEXT or ASCII. + */ public String getString(String name) { return getString(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a variable length integer. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a variable + * length integer. If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} is not of type VARINT. + */ public BigInteger getVarInt(int i) { metadata.checkType(i, DataType.Native.VARINT); @@ -190,10 +430,31 @@ public BigInteger getVarInt(int i) { return IntegerType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a variable length integer. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a variable length integer. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} is not of type VARINT. + */ public BigInteger getVarInt(String name) { return getVarInt(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a variable length decimal. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a variable + * length decimal. If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} is not of type DECIMAL. + */ public BigDecimal getDecimal(int i) { metadata.checkType(i, DataType.Native.DECIMAL); @@ -204,10 +465,32 @@ public BigDecimal getDecimal(int i) { return DecimalType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a variable length decimal. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a variable length decimal. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} is not of type DECIMAL. + */ public BigDecimal getDecimal(String name) { return getDecimal(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has a UUID. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as a UUID. + * If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if columns {@code i} is not of type UUID + * or TIMEUUID. + */ public UUID getUUID(int i) { DataType type = metadata.checkType(i, DataType.Native.UUID, DataType.Native.TIMEUUID); @@ -220,10 +503,49 @@ public UUID getUUID(int i) { : TimeUUIDType.instance.compose(value); } + /** + * Returns the value of column {@code name} has a UUID. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as a UUID. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if columns {@code name} is not of type + * UUID or TIMEUUID. + */ public UUID getUUID(String name) { return getUUID(metadata.getIdx(name)); } + public List getList(int i, Class elts) { + // TODO + return null; + } + + public List getList(String name, Class elts) { + return getList(metadata.getIdx(name), klass); + } + + public Set getSet(int i, Class elts) { + // TODO + return null; + } + + public Set getSet(String name, Class elts) { + return getSet(metadata.getIdx(name), klass); + } + + public Map getMap(int i, Class keys, Class values) { + // TODO + return null; + } + + public Map getMap(String name, Class keys, Class values) { + return getMap(metadata.getIdx(name), keys, values); + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 54b164eb682..d8a183913dc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -66,6 +66,16 @@ public List asList() { return Arrays.asList(byIdx); } + public List names() { + // TODO + return null; + } + + public List types() { + // TODO + return null; + } + /** * Returns the name of the {@code i}th column in this metadata. * @@ -175,8 +185,7 @@ DataType.Native checkType(int i, DataType.Native... types) { if (type == defined) return type; - // TODO: change that exception - throw new IllegalArgumentException(String.format("Column %s is of type %s", name(i), defined)); + throw new InvalidTypeException(String.format("Column %s is of type %s", name(i), defined)); } public static class Definition { diff --git a/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java new file mode 100644 index 00000000000..467215b9c7a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java @@ -0,0 +1,8 @@ +package com.datastax.driver.core; + +// TODO: maybe we can extend something more precise like +// IllegalStateException? +public class InvalidTypeException extends RuntimeException { + + // TODO +} From 63445935aa1fb6d26cfe9be29db56a19efb89ea4 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 6 Aug 2012 10:44:09 +0200 Subject: [PATCH 019/719] Fix build --- .../java/com/datastax/driver/core/BoundStatement.java | 9 --------- .../src/main/java/com/datastax/driver/core/CQLRow.java | 4 ++-- .../com/datastax/driver/core/InvalidTypeException.java | 6 +++--- 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 4b8791b832b..9c310077af1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -45,15 +45,6 @@ public boolean ready() { return remaining == 0; } - /** - * Returns the PreparedStatement on which this BoundStatement is based. - * - * @return the PreparedStatement on which this BoundStatement is based. - */ - public PreparedStatement preparedStatement() { - return statement; - } - /** * Returns whether the {@code i}th variable has been bound to a value. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 097c0bb31e3..663fc57b7a8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -525,7 +525,7 @@ public List getList(int i, Class elts) { } public List getList(String name, Class elts) { - return getList(metadata.getIdx(name), klass); + return getList(metadata.getIdx(name), elts); } public Set getSet(int i, Class elts) { @@ -534,7 +534,7 @@ public Set getSet(int i, Class elts) { } public Set getSet(String name, Class elts) { - return getSet(metadata.getIdx(name), klass); + return getSet(metadata.getIdx(name), elts); } public Map getMap(int i, Class keys, Class values) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java index 467215b9c7a..8629e2f1ac5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java @@ -1,8 +1,8 @@ package com.datastax.driver.core; -// TODO: maybe we can extend something more precise like -// IllegalStateException? public class InvalidTypeException extends RuntimeException { - // TODO + public InvalidTypeException(String msg) { + super(msg); + } } From e04c7ed7f73a20f7ce39fab6ea8f4a8367296780 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 9 Aug 2012 15:32:37 +0200 Subject: [PATCH 020/719] Handle asynchonous connections --- .../driver/core/transport/Connection.java | 94 +++++++++++++------ .../core/transport/StreamIdHandler.java | 31 ++++++ 2 files changed, 94 insertions(+), 31 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 5a019cd266e..417c9d6364b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -2,6 +2,7 @@ import java.net.InetSocketAddress; import java.util.Collections; +import java.util.Iterator; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -123,6 +124,7 @@ public ConnectionException lastException() { private ConnectionException defunct(ConnectionException e) { exception = e; isDefunct = true; + dispatcher.errorOutAllHandler(e); return e; } @@ -169,15 +171,11 @@ public Future write(Message.Request request) throws ConnectionException { request.attach(this); // We only support synchronous mode so far - if (!inFlight.compareAndSet(0, 1)) - throw new RuntimeException("Busy connection (this should not happen, please open a bug report if you see this)"); - + inFlight.incrementAndGet(); try { - Future future = new Future(this); - - // TODO: This assumes the connection is used synchronously, fix that at some point - dispatcher.futureRef.set(future); + Future future = dispatcher.newFuture(); + request.setStreamId(future.streamId); logger.trace(String.format("[%s] writting request %s", name, request)); ChannelFuture writeFuture = channel.write(request); @@ -192,7 +190,6 @@ public Future write(Message.Request request) throws ConnectionException { } else { ce = new TransportException(address, "Error writting", writeFuture.getCause()); } - dispatcher.futureRef.set(null); throw defunct(ce); } @@ -233,8 +230,6 @@ public void close() { public void applyStateTransition(Message.Type requestType, Message.Type responseType) {}; public ClientState clientState() { return null; }; - // TODO: We shouldn't need one factory per-host. We should just have one - // global factory that allow to set the connections parameters and use that everywhere public static class Factory { // TODO We could share those amongst factories @@ -279,29 +274,43 @@ private ClientBootstrap bootstrap() { } } + // TODO: Having a map of Integer -> ResponseHandler might be overkill if we + // use the connection synchronously. See if we want to support lighter + // dispatcher that assume synchronous? private class Dispatcher extends SimpleChannelUpstreamHandler { - private final AtomicReference futureRef = new AtomicReference(); + private final StreamIdHandler streamIdHandler = new StreamIdHandler(); + private final ConcurrentMap pending = new ConcurrentHashMap(); + + public Future newFuture() throws ConnectionException { + int streamId = streamIdHandler.next(); + Future future = new Future(Connection.this, streamId); + ResponseHandler old = pending.put(streamId, future); + assert old == null; + return future; + } @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { logger.trace(String.format("[%s] received ", e.getMessage())); - // As soon as we set the value to the currently set future, a new write could - // be started, so reset the futureRef *before* setting the future for this query. - Future future = futureRef.getAndSet(null); - - // TODO: we should do something better than just throwing an exception - if (future == null) - throw new RuntimeException(String.format("Received %s but no future set", e.getMessage())); - if (!(e.getMessage() instanceof Message.Response)) { logger.debug(String.format("[%s] Received unexpected message: %s", name, e.getMessage())); - ConnectionException ce = new TransportException(address, "Unexpected message received: " + e.getMessage()); - defunct(ce); - future.setException(ce); + defunct(new TransportException(address, "Unexpected message received: " + e.getMessage())); + // TODO: we should allow calling some handler for such error } else { - future.set((Message.Response)e.getMessage()); + Message.Response response = (Message.Response)e.getMessage(); + int streamId = response.getStreamId(); + if (streamId < 0) + // TODO: fix + throw new UnsupportedOperationException("Stream initiated server side are not yet supported"); + + ResponseHandler handler = pending.remove(streamId); + streamIdHandler.release(streamId); + if (handler == null) + // TODO: we should handle those with a default handler + throw new RuntimeException("No handler set"); + handler.onSet(response); } } @@ -313,21 +322,44 @@ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { if (inFlight.get() > 0) return; - ConnectionException ce = new TransportException(address, "Unexpected exception triggered", e.getCause()); - defunct(ce); + defunct(new TransportException(address, "Unexpected exception triggered", e.getCause())); + } - Future future = futureRef.getAndSet(null); - if (future != null) - future.setException(ce); + public void errorOutAllHandler(ConnectionException ce) { + Iterator iter = pending.values().iterator(); + while (iter.hasNext()) + { + iter.next().onException(ce); + iter.remove(); + } } } - public static class Future extends SimpleFuture { - private final Connection connection; + public static class Future extends SimpleFuture implements ResponseHandler { + public final Connection connection; + public final int streamId; - public Future(Connection connection) { + public Future(Connection connection, int streamId) { this.connection = connection; + this.streamId = streamId; + } + + @Override + public void onSet(Message.Response response) + { + super.set(response); } + + @Override + public void onException(ConnectionException exception) + { + super.setException(exception); + } + } + + public interface ResponseHandler { + public void onSet(Message.Response response); + public void onException(ConnectionException exception); } private static class PipelineFactory implements ChannelPipelineFactory { diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java b/driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java new file mode 100644 index 00000000000..dd95aaa74e7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java @@ -0,0 +1,31 @@ +package com.datastax.driver.core.transport; + +import java.util.BitSet; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Handle assigning stream id to message. + */ +public class StreamIdHandler { + + // Stream ids are one byte long, signed and we only handle positive values ourselves. + private static final int STREAM_ID_COUNT = 128; + + // Keep one bit to know which one is in use. + private final BitSet usedIds = new BitSet(STREAM_ID_COUNT); + private final AtomicInteger idx = new AtomicInteger(0); + + public int next() { + int next = idx.getAndIncrement() % STREAM_ID_COUNT; + // Note: we could be fancier, and "search" for the next available idx, + // though that could be race prone, so doing the simplest thing for now + if (usedIds.get(next)) + // TODO: Throw a BusyConnectionException and handle it in the connection pool + throw new IllegalStateException(); + return next; + } + + public void release(int streamId) { + usedIds.clear(streamId); + } +} From cbc9de54122dcca95a5ea9e8168b3d4dbf68b1b0 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 10 Aug 2012 15:18:14 +0200 Subject: [PATCH 021/719] Add prototype of query for schema --- driver-core/pom.xml | 6 + .../com/datastax/driver/core/Cluster.java | 57 ++++- .../datastax/driver/core/ClusterMetadata.java | 78 ++++++ .../datastax/driver/core/ColumnMetadata.java | 79 +++++++ .../com/datastax/driver/core/Columns.java | 1 + .../driver/core/KeyspaceMetadata.java | 74 ++++++ .../com/datastax/driver/core/Session.java | 2 +- .../datastax/driver/core/TableMetadata.java | 222 ++++++++++++++++++ .../driver/core/transport/Connection.java | 68 ++++-- .../com/datastax/driver/core/SessionTest.java | 4 +- 10 files changed, 566 insertions(+), 25 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java diff --git a/driver-core/pom.xml b/driver-core/pom.xml index e4c4e63e79c..40cab65ac90 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -35,6 +35,12 @@ libthrift 0.7.0 + + + org.codehaus.jackson + jackson-mapper-asl + 1.9.8 + diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 36ba79ca1b4..457c5420d44 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -5,6 +5,8 @@ import java.util.*; import java.util.concurrent.*; +import org.apache.cassandra.transport.messages.QueryMessage; + import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; import com.datastax.driver.core.utils.SimpleConvictionPolicy; @@ -43,7 +45,15 @@ private Cluster(List contactPoints) { List hosts = new ArrayList(contactPoints.size()); for (InetSocketAddress address : contactPoints) hosts.add(new Host(address, convictionPolicyFactory)); - this.manager = new Manager(hosts); + try + { + this.manager = new Manager(hosts); + } + catch (ConnectionException e) + { + // TODO: We should hide that somehow and only send a (non-checked) exception if there is no node to connect to + throw new RuntimeException(e); + } } /** @@ -104,6 +114,20 @@ public Session connect(String keyspace, AuthInfo authInfo) { return connect(authInfo).use(keyspace); } + /** + * Returns read-only metadata on the connected cluster. + * + * This includes the know nodes (with their status as seen by the driver) + * as well as the schema definitions. + * + * @return the cluster metadata. + */ + public ClusterMetadata getMetadata() { + // Temporary way to get update to data schema + try { manager.refreshSchema(); } catch (Exception e) { throw new RuntimeException(e); } + return manager.metadata; + } + /** * Configuration for {@link Cluster} instances. */ @@ -253,20 +277,30 @@ class Manager implements Host.StateListener { private final List contactPoints; private final Set sessions = new CopyOnWriteArraySet(); + private final ClusterMetadata metadata; final Connection.Factory connectionFactory = new Connection.Factory(); + private final ControlConnection controlConnection = new ControlConnection(); // TODO: make configurable final LoadBalancingPolicy.Factory loadBalancingFactory = new RoundRobinPolicy.Factory(); private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); - private Manager(List contactPoints) { + private Manager(List contactPoints) throws ConnectionException { this.contactPoints = contactPoints; + this.metadata = new ClusterMetadata(); // TODO: this probably belong some place else for (Host host : contactPoints) host.monitor().register(this); + + refreshSchema(); + } + + private void refreshSchema() throws ConnectionException { + // TODO: something less lame + controlConnection.tryConnect(contactPoints.get(0)); } private Session newSession() { @@ -295,5 +329,24 @@ public void onAdd(Host host) { public void onRemove(Host host) { // TODO } + + public class ControlConnection { + + private Connection connection; + + private void tryConnect(Host host) throws ConnectionException { + connection = connectionFactory.open(host); + + // Make sure we're up to date on metadata + Connection.Future ksFuture = connection.write(new QueryMessage("SELECT * FROM system.schema_keyspaces")); + Connection.Future cfFuture = connection.write(new QueryMessage("SELECT * FROM system.schema_columnfamilies")); + Connection.Future colsFuture = connection.write(new QueryMessage("SELECT * FROM system.schema_columns")); + + // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified + metadata.rebuildSchema(Session.toResultSet(ksFuture), + Session.toResultSet(cfFuture), + Session.toResultSet(colsFuture)); + } + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java new file mode 100644 index 00000000000..a6003b5e80e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -0,0 +1,78 @@ +package com.datastax.driver.core; + +import java.util.*; + +/** + * Keeps metadata on the connected cluster, including known nodes and schema definitions. + */ +public class ClusterMetadata { + + private final Set hosts = new HashSet(); + private final Map keyspaces = new HashMap(); + + void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { + + // TODO: we need to switch the keyspaces map completely + + Map> cfDefs = new HashMap>(); + Map>> colsDefs = new HashMap>>(); + + for (CQLRow row : cfs) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + List l = cfDefs.get(ksName); + if (l == null) { + l = new ArrayList(); + cfDefs.put(ksName, l); + } + l.add(row); + } + + for (CQLRow row : cols) { + String ksName = row.getString(KeyspaceMetadata.KS_NAME); + String cfName = row.getString(TableMetadata.CF_NAME); + Map> colsByCf = colsDefs.get(ksName); + if (colsByCf == null) { + colsByCf = new HashMap>(); + colsDefs.put(ksName, colsByCf); + } + List l = colsByCf.get(cfName); + if (l == null) { + l = new ArrayList(); + colsByCf.put(cfName, l); + } + l.add(row); + } + + for (CQLRow ksRow : ks) { + String ksName = ksRow.getString(KeyspaceMetadata.KS_NAME); + KeyspaceMetadata ksm = KeyspaceMetadata.build(ksRow); + + if (cfDefs.get(ksName) != null) { + for (CQLRow cfRow : cfDefs.get(ksName)) { + String cfName = cfRow.getString(TableMetadata.CF_NAME); + TableMetadata tm = TableMetadata.build(ksm, cfRow); + + if (colsDefs.get(ksName) == null || colsDefs.get(ksName).get(cfName) == null) + continue; + + for (CQLRow colRow : colsDefs.get(ksName).get(cfName)) { + ColumnMetadata cm = ColumnMetadata.build(tm, colRow); + } + } + } + + keyspaces.put(ksName, ksm); + } + } + + // TODO: Returning a multi-line string from toString might not be a good idea + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + for (KeyspaceMetadata ksm : keyspaces.values()) + sb.append(ksm).append("\n"); + + return sb.toString(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java new file mode 100644 index 00000000000..b3874eb65c8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -0,0 +1,79 @@ +package com.datastax.driver.core; + +import java.util.*; + +import com.datastax.driver.core.transport.Codec; + +import org.apache.cassandra.config.ConfigurationException; +import org.apache.cassandra.db.marshal.AbstractType; +import org.apache.cassandra.db.marshal.TypeParser; + +/** + * Describes a Column. + */ +public class ColumnMetadata { + + private static final String COLUMN_NAME = "column_name"; + private static final String VALIDATOR = "validator"; + private static final String INDEX = "component_index"; + + private final TableMetadata table; + private final String name; + private final DataType type; + private final Index index; + + ColumnMetadata(TableMetadata table, String name, DataType type, Index index) { + this.table = table; + this.name = name; + this.type = type; + this.index = index; + } + + static ColumnMetadata build(TableMetadata tm, CQLRow row) { + try { + String name = row.getString(COLUMN_NAME); + AbstractType t = TypeParser.parse(row.getString(VALIDATOR)); + ColumnMetadata cm = new ColumnMetadata(tm, name, Codec.rawTypeToDataType(t), Index.build(row)); + tm.add(cm); + return cm; + } catch (ConfigurationException e) { + // The server will have validated the type + throw new RuntimeException(e); + } + } + + public String getName() { + return name; + } + + static class Index { + + private static final String INDEX_TYPE = "index_type"; + private static final String INDEX_OPTIONS = "index_options"; + private static final String INDEX_NAME = "index_name"; + + public final String name; + public final String type; + public final Map options = new HashMap(); + + private Index(String name, String type) { + this.name = name; + this.type = type; + } + + public static Index build(CQLRow row) { + String type = row.getString(INDEX_TYPE); + if (type == null) + return null; + + Index index = new Index(type, row.getString(INDEX_NAME)); + // TODO: handle options + return index; + } + } + + @Override + public String toString() { + return name + " " + type; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index d8a183913dc..6389cdc58f9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -188,6 +188,7 @@ DataType.Native checkType(int i, DataType.Native... types) { throw new InvalidTypeException(String.format("Column %s is of type %s", name(i), defined)); } + // TODO: replace by ColumnMetadata public static class Definition { public final String keyspace; diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java new file mode 100644 index 00000000000..1afbbc73489 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -0,0 +1,74 @@ +package com.datastax.driver.core; + +import java.util.*; + +/** + * Describes the keyspace defined in the cluster, i.e. the current schema. + */ +public class KeyspaceMetadata { + + public static final String KS_NAME = "keyspace_name"; + private static final String DURABLE_WRITES = "durable_writes"; + private static final String STRATEGY_CLASS = "strategy_class"; + private static final String STRATEGY_OPTIONS = "strategy_options"; + + private final String name; + private final boolean durableWrites; + private final Map replication = new HashMap(); + private final Map tables = new HashMap(); + + private KeyspaceMetadata(String name, boolean durableWrites) { + this.name = name; + this.durableWrites = durableWrites; + } + + static KeyspaceMetadata build(CQLRow row) { + + String name = row.getString(KS_NAME); + boolean durableWrites = row.getBool(DURABLE_WRITES); + KeyspaceMetadata ksm = new KeyspaceMetadata(name, durableWrites); + ksm.replication.put("class", row.getString(STRATEGY_CLASS)); + ksm.replication.put("options", row.getString(STRATEGY_OPTIONS)); + return ksm; + } + + public String getName() { + return name; + } + + public boolean isDurableWrites() { + return durableWrites; + } + + public Map getReplicationStrategy() { + return new HashMap(replication); + } + + public TableMetadata getTable(String name) { + return tables.get(name); + } + + public Collection getTables() { + return tables.values(); + } + + // TODO: Returning a multi-line string from toString might not be a good idea + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE KEYSPACE ").append(name).append(" WITH "); + sb.append("STRATEGY_CLASS = ").append(replication.get("class")); + // TODO: handle the options + sb.append(";\n"); + + for (TableMetadata tm : tables.values()) + sb.append("\n").append(tm); + + return sb.toString(); + } + + void add(TableMetadata tm) { + tables.put(tm.getName(), tm); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index ebf4b85a30e..e0948e0b737 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -189,7 +189,7 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { return null; } - private ResultSet toResultSet(Connection.Future future) { + static ResultSet toResultSet(Connection.Future future) { try { Message.Response response = future.get(); switch (response.type) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java new file mode 100644 index 00000000000..301ee975eb1 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -0,0 +1,222 @@ +package com.datastax.driver.core; + +import java.util.*; + +import com.datastax.driver.core.transport.Codec; + +import org.apache.cassandra.config.ConfigurationException; +import org.apache.cassandra.db.marshal.*; + +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.map.ObjectMapper; + +/** + * Describes a Table. + */ +public class TableMetadata { + + public static final String CF_NAME = "columnfamily_name"; + + private static final String KEY_VALIDATOR = "key_validator"; + private static final String COMPARATOR = "comparator"; + private static final String VALIDATOR = "default_validator"; + + private static final String KEY_ALIASES = "key_aliases"; + private static final String COLUMN_ALIASES = "column_aliases"; + private static final String VALUE_ALIAS = "value_alias"; + + private static final String DEFAULT_KEY_ALIAS = "key"; + private static final String DEFAULT_COLUMN_ALIAS = "column"; + private static final String DEFAULT_VALUE_ALIAS = "value"; + + private final KeyspaceMetadata keyspace; + private final String name; + // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. + private final Map columns = new LinkedHashMap(); + private final List partitionKey = new ArrayList(); + private final List clusteringKey = new ArrayList(); + private final Options options; + + private TableMetadata(KeyspaceMetadata keyspace, String name, Options options) { + this.keyspace = keyspace; + this.name = name; + this.options = options; + } + + static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { + try { + String name = row.getString(CF_NAME); + TableMetadata tm = new TableMetadata(ksm, name, new Options(row)); + + // Partition key + AbstractType kt = TypeParser.parse(row.getString(KEY_VALIDATOR)); + List> keyTypes = kt instanceof CompositeType + ? ((CompositeType)kt).types + : Collections.>singletonList(kt); + List keyAliases = fromJsonList(row.getString(KEY_ALIASES)); + for (int i = 0; i < keyTypes.size(); i++) { + String cn = keyAliases.size() > i + ? keyAliases.get(i) + : (i == 0 ? DEFAULT_KEY_ALIAS : DEFAULT_KEY_ALIAS + (i + 1)); + DataType dt = Codec.rawTypeToDataType(keyTypes.get(i)); + ColumnMetadata colMeta = new ColumnMetadata(tm, cn, dt, null); + tm.columns.put(cn, colMeta); + tm.partitionKey.add(colMeta); + } + + // Clustering key + // TODO: this is actually more complicated than that ... + AbstractType ct = TypeParser.parse(row.getString(COMPARATOR)); + boolean isComposite = ct instanceof CompositeType; + List> columnTypes = isComposite + ? ((CompositeType)ct).types + : Collections.>singletonList(ct); + List columnAliases = fromJsonList(row.getString(COLUMN_ALIASES)); + int clusteringSize; + boolean hasValue; + if (isComposite) { + if (columnTypes.size() == columnAliases.size()) { + hasValue = true; + clusteringSize = columnTypes.size(); + } else { + hasValue = false; + clusteringSize = columnTypes.get(columnTypes.size() - 1) instanceof ColumnToCollectionType + ? columnTypes.size() - 2 + : columnTypes.size() - 1; + } + } else { + // TODO: this is not a good test to know if it's dynamic vs static. We should also see if there is any column_metadata + if (columnAliases.size() > 0) { + hasValue = true; + clusteringSize = columnTypes.size(); + } else { + hasValue = false; + clusteringSize = 0; + } + } + + for (int i = 0; i < clusteringSize; i++) { + String cn = columnAliases.size() > i ? columnAliases.get(i) : DEFAULT_COLUMN_ALIAS + (i + 1); + DataType dt = Codec.rawTypeToDataType(columnTypes.get(i)); + ColumnMetadata colMeta = new ColumnMetadata(tm, cn, dt, null); + tm.columns.put(cn, colMeta); + tm.clusteringKey.add(colMeta); + } + + // Value alias (if present) + if (hasValue) { + AbstractType vt = TypeParser.parse(row.getString(VALIDATOR)); + String valueAlias = row.isNull(KEY_ALIASES) ? DEFAULT_VALUE_ALIAS : row.getString(VALUE_ALIAS); + ColumnMetadata vm = new ColumnMetadata(tm, valueAlias, Codec.rawTypeToDataType(vt), null); + tm.columns.put(valueAlias, vm); + } + + ksm.add(tm); + return tm; + } catch (ConfigurationException e) { + // The server will have validated the type + throw new RuntimeException(e); + } + } + + public String getName() { + return name; + } + + // :_( + private static ObjectMapper jsonMapper = new ObjectMapper(new JsonFactory()); + private static List fromJsonList(String json) + { + try + { + return jsonMapper.readValue(json, List.class); + } + catch (Exception e) + { + throw new RuntimeException(e); + } + } + + void add(ColumnMetadata column) { + columns.put(column.getName(), column); + } + + // TODO: Returning a multi-line string from toString might not be a good idea + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE TABLE ").append(name).append(" (\n"); + for (ColumnMetadata cm : columns.values()) { + sb.append(" ").append(cm).append(",\n"); + } + + // PK + sb.append(" ").append("PRIMARY KEY ("); + if (partitionKey.size() == 1) { + sb.append(partitionKey.get(0).getName()); + } else { + sb.append("("); + boolean first = true; + for (ColumnMetadata cm : partitionKey) { + if (first) first = false; else sb.append(", "); + sb.append(cm.getName()); + } + sb.append(")"); + } + for (ColumnMetadata cm : clusteringKey) + sb.append(", ").append(cm.getName()); + sb.append(")\n"); + // end PK + + // TODO: finish + sb.append(");\n"); + return sb.toString(); + } + + // TODO: add getter for those + private static class Options { + + private static final String COMMENT = "comment"; + private static final String READ_REPAIR = "read_repair_chance"; + private static final String LOCAL_READ_REPAIR = "local_read_repair_chance"; + private static final String REPLICATE_ON_WRITE = "replicate_on_write"; + private static final String GC_GRACE = "gc_grace_seconds"; + private static final String BF_FP_CHANCE = "bloom_filter_fp_chance"; + private static final String CACHING = "caching"; + private static final String COMPACTION_CLASS = "compaction_strategy_class"; + private static final String COMPACTION_OPTIONS = "compaction_strategy_options"; + private static final String MIN_COMPACTION_THRESHOLD = "min_compaction_threshold"; + private static final String MAX_COMPACTION_THRESHOLD = "max_compaction_threshold"; + private static final String COMPRESSION_PARAMS = "compression_parameters"; + + private static final double DEFAULT_BF_FP_CHANCE = 0.01; + + public final String comment; + public final double readRepair; + public final double localReadRepair; + public final boolean replicateOnWrite; + public final int gcGrace; + public final double bfFpChance; + public final String caching; + public final Map compaction = new HashMap(); + public final Map compression = new HashMap(); + + public Options(CQLRow row) { + this.comment = row.isNull(COMMENT) ? "" : row.getString(COMMENT); + this.readRepair = row.getDouble(READ_REPAIR); + this.localReadRepair = row.getDouble(LOCAL_READ_REPAIR); + this.replicateOnWrite = row.getBool(REPLICATE_ON_WRITE); + this.gcGrace = row.getInt(GC_GRACE); + this.bfFpChance = row.isNull(BF_FP_CHANCE) ? DEFAULT_BF_FP_CHANCE : row.getDouble(BF_FP_CHANCE); + this.caching = row.getString(CACHING); + + // TODO: this should change (split options and handle min/max threshold in particular) + compaction.put("class", row.getString(COMPACTION_CLASS)); + compaction.put("options", row.getString(COMPACTION_OPTIONS)); + + // TODO: this should split the parameters + compression.put("params", row.getString(COMPRESSION_PARAMS)); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 417c9d6364b..05a00cd1cfe 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -45,7 +45,6 @@ public class Connection extends org.apache.cassandra.transport.Connection private volatile boolean isDefunct; private volatile ConnectionException exception; - /** * Create a new connection to a Cassandra node. * @@ -162,6 +161,26 @@ public void setKeyspace(String keyspace) throws ConnectionException { * @throws TransportException if an I/O error while sending the request */ public Future write(Message.Request request) throws ConnectionException { + Future future = new Future(this, dispatcher.streamIdHandler.next()); + internalWrite(request, future); + return future; + } + + public void write(Message.Request request, final ResponseCallback callback) throws ConnectionException { + final int streamId = dispatcher.streamIdHandler.next(); + internalWrite(request, new ResponseHandler() { + + public int getStreamId() { + return streamId; + } + + public ResponseCallback callback() { + return callback; + } + }); + } + + private void internalWrite(Message.Request request, ResponseHandler handler) throws ConnectionException { if (isDefunct) throw new ConnectionException(address, "Write attempt on defunct connection"); @@ -174,8 +193,8 @@ public Future write(Message.Request request) throws ConnectionException { inFlight.incrementAndGet(); try { - Future future = dispatcher.newFuture(); - request.setStreamId(future.streamId); + dispatcher.add(handler); + request.setStreamId(handler.getStreamId()); logger.trace(String.format("[%s] writting request %s", name, request)); ChannelFuture writeFuture = channel.write(request); @@ -194,7 +213,6 @@ public Future write(Message.Request request) throws ConnectionException { } logger.trace(String.format("[%s] request sent successfully", name)); - return future; } finally { inFlight.decrementAndGet(); @@ -279,15 +297,12 @@ private ClientBootstrap bootstrap() { // dispatcher that assume synchronous? private class Dispatcher extends SimpleChannelUpstreamHandler { - private final StreamIdHandler streamIdHandler = new StreamIdHandler(); + public final StreamIdHandler streamIdHandler = new StreamIdHandler(); private final ConcurrentMap pending = new ConcurrentHashMap(); - public Future newFuture() throws ConnectionException { - int streamId = streamIdHandler.next(); - Future future = new Future(Connection.this, streamId); - ResponseHandler old = pending.put(streamId, future); + public void add(ResponseHandler handler) { + ResponseHandler old = pending.put(handler.getStreamId(), handler); assert old == null; - return future; } @Override @@ -309,8 +324,8 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { streamIdHandler.release(streamId); if (handler == null) // TODO: we should handle those with a default handler - throw new RuntimeException("No handler set"); - handler.onSet(response); + throw new RuntimeException("No handler set for " + streamId + ", handlers = " + pending); + handler.callback().onSet(response); } } @@ -329,39 +344,50 @@ public void errorOutAllHandler(ConnectionException ce) { Iterator iter = pending.values().iterator(); while (iter.hasNext()) { - iter.next().onException(ce); + iter.next().callback().onException(ce); iter.remove(); } } } - public static class Future extends SimpleFuture implements ResponseHandler { - public final Connection connection; - public final int streamId; + public static class Future extends SimpleFuture implements ResponseHandler, ResponseCallback { + private final Connection connection; + private final int streamId; public Future(Connection connection, int streamId) { this.connection = connection; this.streamId = streamId; } + public int getStreamId() { + return streamId; + } + + public ResponseCallback callback() { + return this; + } + @Override - public void onSet(Message.Response response) - { + public void onSet(Message.Response response) { super.set(response); } @Override - public void onException(ConnectionException exception) - { + public void onException(ConnectionException exception) { super.setException(exception); } } - public interface ResponseHandler { + public interface ResponseCallback { public void onSet(Message.Response response); public void onException(ConnectionException exception); } + private interface ResponseHandler { + public int getStreamId(); + public ResponseCallback callback(); + } + private static class PipelineFactory implements ChannelPipelineFactory { // Stateless handlers private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 5e1dfc0b601..ebe0de45360 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -19,7 +19,7 @@ public class SessionTest { public static void classSetUp() { Logger rootLogger = Logger.getRootLogger(); if (!rootLogger.getAllAppenders().hasMoreElements()) { - rootLogger.setLevel(Level.DEBUG); + rootLogger.setLevel(Level.TRACE); rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); } } @@ -108,6 +108,8 @@ public void MultiNodeContinuousExecuteTest() throws Exception { session.use("test_ks"); session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + for (int i = 0; i < 10000; ++i) { System.out.println(">> " + i); session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); From ba690e71652e23e78d619aaef074047acfe58e27 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 11 Sep 2012 20:27:29 +0200 Subject: [PATCH 022/719] Update code for last cassandra changes --- .../datastax/driver/core/ColumnMetadata.java | 4 +- .../driver/core/KeyspaceMetadata.java | 11 ++-- .../datastax/driver/core/TableMetadata.java | 52 ++++++++++++------- .../driver/core/transport/Connection.java | 19 +++++-- .../com/datastax/driver/core/SessionTest.java | 16 +++--- 5 files changed, 68 insertions(+), 34 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index b3874eb65c8..82a4db3371c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -4,7 +4,7 @@ import com.datastax.driver.core.transport.Codec; -import org.apache.cassandra.config.ConfigurationException; +import org.apache.cassandra.exceptions.RequestValidationException; import org.apache.cassandra.db.marshal.AbstractType; import org.apache.cassandra.db.marshal.TypeParser; @@ -36,7 +36,7 @@ static ColumnMetadata build(TableMetadata tm, CQLRow row) { ColumnMetadata cm = new ColumnMetadata(tm, name, Codec.rawTypeToDataType(t), Index.build(row)); tm.add(cm); return cm; - } catch (ConfigurationException e) { + } catch (RequestValidationException e) { // The server will have validated the type throw new RuntimeException(e); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index 1afbbc73489..684d2818367 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -28,7 +28,7 @@ static KeyspaceMetadata build(CQLRow row) { boolean durableWrites = row.getBool(DURABLE_WRITES); KeyspaceMetadata ksm = new KeyspaceMetadata(name, durableWrites); ksm.replication.put("class", row.getString(STRATEGY_CLASS)); - ksm.replication.put("options", row.getString(STRATEGY_OPTIONS)); + ksm.replication.putAll(TableMetadata.fromJsonMap(row.getString(STRATEGY_OPTIONS))); return ksm; } @@ -58,8 +58,13 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("CREATE KEYSPACE ").append(name).append(" WITH "); - sb.append("STRATEGY_CLASS = ").append(replication.get("class")); - // TODO: handle the options + sb.append("REPLICATION = { 'class' : '").append(replication.get("class")).append("'"); + for (Map.Entry entry : replication.entrySet()) { + if (entry.getKey().equals("class")) + continue; + sb.append(", '").append(entry.getKey()).append("': '").append(entry.getValue()).append("'"); + } + sb.append(" } AND DURABLE_WRITES = ").append(durableWrites); sb.append(";\n"); for (TableMetadata tm : tables.values()) diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 301ee975eb1..95172074eab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -4,7 +4,7 @@ import com.datastax.driver.core.transport.Codec; -import org.apache.cassandra.config.ConfigurationException; +import org.apache.cassandra.exceptions.RequestValidationException; import org.apache.cassandra.db.marshal.*; import org.codehaus.jackson.JsonFactory; @@ -15,15 +15,15 @@ */ public class TableMetadata { - public static final String CF_NAME = "columnfamily_name"; + public static final String CF_NAME = "columnfamily_name"; - private static final String KEY_VALIDATOR = "key_validator"; - private static final String COMPARATOR = "comparator"; - private static final String VALIDATOR = "default_validator"; + private static final String KEY_VALIDATOR = "key_validator"; + private static final String COMPARATOR = "comparator"; + private static final String VALIDATOR = "default_validator"; - private static final String KEY_ALIASES = "key_aliases"; - private static final String COLUMN_ALIASES = "column_aliases"; - private static final String VALUE_ALIAS = "value_alias"; + private static final String KEY_ALIASES = "key_aliases"; + private static final String COLUMN_ALIASES = "column_aliases"; + private static final String VALUE_ALIAS = "value_alias"; private static final String DEFAULT_KEY_ALIAS = "key"; private static final String DEFAULT_COLUMN_ALIAS = "column"; @@ -113,7 +113,7 @@ static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { ksm.add(tm); return tm; - } catch (ConfigurationException e) { + } catch (RequestValidationException e) { // The server will have validated the type throw new RuntimeException(e); } @@ -125,14 +125,19 @@ public String getName() { // :_( private static ObjectMapper jsonMapper = new ObjectMapper(new JsonFactory()); - private static List fromJsonList(String json) - { - try - { + + static List fromJsonList(String json) { + try { return jsonMapper.readValue(json, List.class); + } catch (Exception e) { + throw new RuntimeException(e); } - catch (Exception e) - { + } + + static Map fromJsonMap(String json) { + try { + return jsonMapper.readValue(json, Map.class); + } catch (Exception e) { throw new RuntimeException(e); } } @@ -168,9 +173,20 @@ public String toString() { sb.append(", ").append(cm.getName()); sb.append(")\n"); // end PK - - // TODO: finish - sb.append(");\n"); + sb.append(")"); + + // Options + sb.append(" WITH read_repair_chance = ").append(options.readRepair); + sb.append("\n AND local_read_repair_chance = ").append(options.localReadRepair); + sb.append("\n AND replicate_on_write = ").append(options.replicateOnWrite); + sb.append("\n AND gc_grace_seconds = ").append(options.gcGrace); + sb.append("\n AND bloom_filter_fp_chance = ").append(options.bfFpChance); + sb.append("\n AND caching = ").append(options.caching); + if (options.comment != null) + sb.append("\n AND comment = ").append(options.comment); + + // TODO: finish (compaction and compression) + sb.append(";\n"); return sb.toString(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 05a00cd1cfe..05701306028 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -1,8 +1,9 @@ package com.datastax.driver.core.transport; import java.net.InetSocketAddress; -import java.util.Collections; import java.util.Iterator; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -30,6 +31,11 @@ public class Connection extends org.apache.cassandra.transport.Connection // TODO: that doesn't belong here private static final String CQL_VERSION = "3.0.0"; + private static final org.apache.cassandra.transport.Connection.Tracker EMPTY_TRACKER = new org.apache.cassandra.transport.Connection.Tracker() { + public void addConnection(Channel ch, org.apache.cassandra.transport.Connection connection) {} + public void closeAll() {} + }; + public final InetSocketAddress address; private final String name; @@ -54,6 +60,8 @@ public class Connection extends org.apache.cassandra.transport.Connection * refused by the server. */ private Connection(String name, InetSocketAddress address, Factory factory) throws ConnectionException { + super(EMPTY_TRACKER); + this.address = address; this.factory = factory; this.name = name; @@ -92,14 +100,17 @@ private void initializeTransport() throws ConnectionException { // TODO: we will need to get fancy about handling protocol version at // some point, but keep it simple for now. // TODO: we need to allow setting the compression to use - StartupMessage startup = new StartupMessage(CQL_VERSION, Collections.emptyMap()); + Map options = new HashMap() {{ + put(StartupMessage.CQL_VERSION, CQL_VERSION); + }}; + StartupMessage startup = new StartupMessage(options); try { Message.Response response = write(startup).get(); switch (response.type) { case READY: break; case ERROR: - throw defunct(new TransportException(address, String.format("Error initializing connection: %s", ((ErrorMessage)response).errorMsg))); + throw defunct(new TransportException(address, String.format("Error initializing connection", ((ErrorMessage)response).error))); case AUTHENTICATE: throw new TransportException(address, "Authentication required but not yet supported"); default: @@ -411,7 +422,7 @@ public void closeAll() {} public PipelineFactory(final Connection connection) { this.connection = connection; this.cfactory = new org.apache.cassandra.transport.Connection.Factory() { - public Connection newConnection() { + public Connection newConnection(org.apache.cassandra.transport.Connection.Tracker tracker) { return connection; } }; diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index ebe0de45360..f2698946236 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -104,16 +104,18 @@ public void MultiNodeContinuousExecuteTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); Session session = cluster.connect(); - session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 2"); + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + // We should deal with that sleep + try { Thread.sleep(1000); } catch (Exception e) {} session.use("test_ks"); session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + System.out.println("--- Schema ---\n" + cluster.getMetadata()); - for (int i = 0; i < 10000; ++i) { - System.out.println(">> " + i); - session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - Thread.currentThread().sleep(1000); - } + //for (int i = 0; i < 10000; ++i) { + // System.out.println(">> " + i); + // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + // Thread.currentThread().sleep(1000); + //} } } From 94eb32e371841f4b7f48bddc599954cd56f95e96 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 12 Sep 2012 22:10:50 +0200 Subject: [PATCH 023/719] Initial support of events --- .../com/datastax/driver/core/Cluster.java | 142 ++++++++++++------ .../datastax/driver/core/ClusterMetadata.java | 40 ++++- .../datastax/driver/core/ColumnMetadata.java | 8 + .../com/datastax/driver/core/Columns.java | 1 - .../driver/core/ControlConnection.java | 112 ++++++++++++++ .../java/com/datastax/driver/core/Host.java | 2 + .../com/datastax/driver/core/Session.java | 33 ++-- .../datastax/driver/core/TableMetadata.java | 8 + .../driver/core/pool/HostConnectionPool.java | 2 + .../driver/core/transport/Connection.java | 26 +++- .../driver/core/utils/RoundRobinPolicy.java | 4 + .../com/datastax/driver/core/SessionTest.java | 12 +- 12 files changed, 321 insertions(+), 69 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 457c5420d44..99f1b9d5216 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -5,6 +5,9 @@ import java.util.*; import java.util.concurrent.*; +import org.apache.cassandra.transport.Event; +import org.apache.cassandra.transport.Message; +import org.apache.cassandra.transport.messages.EventMessage; import org.apache.cassandra.transport.messages.QueryMessage; import com.datastax.driver.core.transport.Connection; @@ -37,17 +40,12 @@ public class Cluster { private static final Logger logger = LoggerFactory.getLogger(Cluster.class); - // TODO: Make that configurable - private final ConvictionPolicy.Factory convictionPolicyFactory = new SimpleConvictionPolicy.Factory(); final Manager manager; private Cluster(List contactPoints) { - List hosts = new ArrayList(contactPoints.size()); - for (InetSocketAddress address : contactPoints) - hosts.add(new Host(address, convictionPolicyFactory)); try { - this.manager = new Manager(hosts); + this.manager = new Manager(contactPoints); } catch (ConnectionException e) { @@ -123,8 +121,6 @@ public Session connect(String keyspace, AuthInfo authInfo) { * @return the cluster metadata. */ public ClusterMetadata getMetadata() { - // Temporary way to get update to data schema - try { manager.refreshSchema(); } catch (Exception e) { throw new RuntimeException(e); } return manager.metadata; } @@ -271,82 +267,144 @@ public Cluster build() { * that Manager is not publicly visible. For instance, we wouldn't want * user to be able to call the {@link #onUp} and {@link #onDown} methods. */ - class Manager implements Host.StateListener { + class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // Initial contacts point - private final List contactPoints; + final List contactPoints; private final Set sessions = new CopyOnWriteArraySet(); - private final ClusterMetadata metadata; + final ClusterMetadata metadata; - final Connection.Factory connectionFactory = new Connection.Factory(); - private final ControlConnection controlConnection = new ControlConnection(); + // TODO: Make that configurable + final ConvictionPolicy.Factory convictionPolicyFactory = new SimpleConvictionPolicy.Factory(); + final Connection.Factory connectionFactory; + private final ControlConnection controlConnection; // TODO: make configurable - final LoadBalancingPolicy.Factory loadBalancingFactory = new RoundRobinPolicy.Factory(); + final LoadBalancingPolicy.Factory loadBalancingFactory = RoundRobinPolicy.Factory.INSTANCE; - private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); + // TODO: give a name to the threads of this executor + private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1); - private Manager(List contactPoints) throws ConnectionException { + private Manager(List contactPoints) throws ConnectionException { + this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; - this.metadata = new ClusterMetadata(); + this.connectionFactory = new Connection.Factory(this); - // TODO: this probably belong some place else - for (Host host : contactPoints) - host.monitor().register(this); + for (InetSocketAddress address : contactPoints) + addHost(address, false); - refreshSchema(); - } - - private void refreshSchema() throws ConnectionException { - // TODO: something less lame - controlConnection.tryConnect(contactPoints.get(0)); + this.controlConnection = new ControlConnection(this); + controlConnection.reconnect(); } private Session newSession() { - Session session = new Session(Cluster.this, contactPoints); + Session session = new Session(Cluster.this, metadata.allHosts()); sessions.add(session); return session; } public void onUp(Host host) { - // Nothing specific + logger.trace(String.format("Host %s is UP", host)); + controlConnection.onUp(host); + for (Session s : sessions) + s.manager.onUp(host); + // TODO: We should register reconnection attempts, to avoid starting two of // them and if this method is called by other means that the // reconnection handler (like C* tells us it's up), cancel the latter } public void onDown(Host host) { + logger.trace(String.format("Host %s is DOWN", host)); + controlConnection.onUp(host); + for (Session s : sessions) + s.manager.onDown(host); + // Note: we'll basically waste the first successful reconnection that way, but it's probably not a big deal logger.debug(String.format("%s is down, scheduling connection retries", host)); new ReconnectionHandler(host, scheduledExecutor, connectionFactory).start(); } public void onAdd(Host host) { - // TODO + logger.trace(String.format("Adding new host %s", host)); + controlConnection.onAdd(host); + for (Session s : sessions) + s.manager.onAdd(host); } public void onRemove(Host host) { - // TODO + logger.trace(String.format("Removing host %s", host)); + controlConnection.onRemove(host); + for (Session s : sessions) + s.manager.onRemove(host); } - public class ControlConnection { + public void addHost(InetSocketAddress address, boolean signal) { + Host newHost = metadata.add(address); + if (newHost != null && signal) + onAdd(newHost); + } - private Connection connection; + public void removeHost(Host host) { + if (host == null) + return; - private void tryConnect(Host host) throws ConnectionException { - connection = connectionFactory.open(host); + if (metadata.remove(host)) + onRemove(host); + } - // Make sure we're up to date on metadata - Connection.Future ksFuture = connection.write(new QueryMessage("SELECT * FROM system.schema_keyspaces")); - Connection.Future cfFuture = connection.write(new QueryMessage("SELECT * FROM system.schema_columnfamilies")); - Connection.Future colsFuture = connection.write(new QueryMessage("SELECT * FROM system.schema_columns")); + // Called when some message has been received but has been initiated from the server (streamId < 0). + public void handle(Message.Response response) { - // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified - metadata.rebuildSchema(Session.toResultSet(ksFuture), - Session.toResultSet(cfFuture), - Session.toResultSet(colsFuture)); + if (!(response instanceof EventMessage)) { + // TODO: log some error + return; } + + final Event event = ((EventMessage)response).event; + + // When handle is called, the current thread is a network I/O + // thread, and we don't want to block it (typically addHost() will + // create the connection pool to the new node, which can take time) + scheduledExecutor.execute(new Runnable() { + public void run() { + switch (event.type) { + case TOPOLOGY_CHANGE: + Event.TopologyChange tpc = (Event.TopologyChange)event; + switch (tpc.change) { + case NEW_NODE: + addHost(tpc.node, true); + break; + case REMOVED_NODE: + removeHost(metadata.getHost(tpc.node)); + break; + } + break; + case STATUS_CHANGE: + Event.StatusChange stc = (Event.StatusChange)event; + switch (stc.status) { + case UP: + Host host = metadata.getHost(stc.node); + if (host == null) { + // first time we heard about that node apparently, add it + addHost(stc.node, true); + } else { + onUp(host); + } + break; + case DOWN: + // Ignore down event. Connection will realized a node is dead quicly enough when they write to + // it, and there is no point in taking the risk of marking the node down mistakenly because we + // didn't received the event in a timely fashion + break; + } + break; + } + } + }); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index a6003b5e80e..5581bf83150 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -1,15 +1,23 @@ package com.datastax.driver.core; +import java.net.InetSocketAddress; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; /** * Keeps metadata on the connected cluster, including known nodes and schema definitions. */ public class ClusterMetadata { - private final Set hosts = new HashSet(); + private final Cluster.Manager cluster; + private final ConcurrentMap hosts = new ConcurrentHashMap(); private final Map keyspaces = new HashMap(); + ClusterMetadata(Cluster.Manager cluster) { + this.cluster = cluster; + } + void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { // TODO: we need to switch the keyspaces map completely @@ -65,6 +73,36 @@ void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { } } + Host add(InetSocketAddress address) { + Host newHost = new Host(address, cluster.convictionPolicyFactory); + Host previous = hosts.putIfAbsent(address, newHost); + if (previous == null) + { + newHost.monitor().register(cluster); + return newHost; + } + else + { + return null; + } + } + + boolean remove(Host host) { + return hosts.remove(host.getAddress()) != null; + } + + Host getHost(InetSocketAddress address) { + return hosts.get(address); + } + + Collection allHosts() { + return hosts.values(); + } + + public KeyspaceMetadata getKeyspace(String keyspace) { + return keyspaces.get(keyspace); + } + // TODO: Returning a multi-line string from toString might not be a good idea @Override public String toString() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index 82a4db3371c..50e7fa8750c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -46,6 +46,14 @@ public String getName() { return name; } + public TableMetadata getTable() { + return table; + } + + public DataType getType() { + return type; + } + static class Index { private static final String INDEX_TYPE = "index_type"; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 6389cdc58f9..d8a183913dc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -188,7 +188,6 @@ DataType.Native checkType(int i, DataType.Native... types) { throw new InvalidTypeException(String.format("Column %s is of type %s", name(i), defined)); } - // TODO: replace by ColumnMetadata public static class Definition { public final String keyspace; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java new file mode 100644 index 00000000000..7ce26800297 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -0,0 +1,112 @@ +package com.datastax.driver.core; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import com.datastax.driver.core.transport.Connection; +import com.datastax.driver.core.transport.ConnectionException; +import com.datastax.driver.core.utils.RoundRobinPolicy; + +import org.apache.cassandra.transport.Event; +import org.apache.cassandra.transport.messages.RegisterMessage; +import org.apache.cassandra.transport.messages.QueryMessage; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +class ControlConnection implements Host.StateListener { + + private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); + + private static final String SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"; + private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; + private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; + + private Connection connection; + + private final Cluster.Manager cluster; + private final LoadBalancingPolicy balancingPolicy; + + public ControlConnection(Cluster.Manager cluster) { + this.cluster = cluster; + this.balancingPolicy = RoundRobinPolicy.Factory.INSTANCE.create(cluster.metadata.allHosts()); + } + + public void reconnect() { + + if (connection != null) + connection.close(); + + Iterator iter = balancingPolicy.newQueryPlan(); + while (iter.hasNext()) { + Host host = iter.next(); + try { + tryConnect(host); + return; + } catch (ConnectionException e) { + // TODO: log something + // Just try next node + } + } + + // TODO: we should log an error but reschedule for later + throw new RuntimeException(); + } + + public void tryConnect(Host host) throws ConnectionException { + connection = cluster.connectionFactory.open(host); + logger.trace(String.format("Control connection connected to %s", host)); + + List evs = Arrays.asList(new Event.Type[]{ + Event.Type.TOPOLOGY_CHANGE, + Event.Type.STATUS_CHANGE, + }); + connection.write(new RegisterMessage(evs)); + + refreshSchema(); + // TODO: also catch up on potentially missed nodes (and node that happens to be up but not known to us) + } + + public void refreshSchema() { + // Shouldn't happen unless we have bigger problems, but no reason to NPE + if (connection == null || connection.isClosed()) { + reconnect(); + } + + // Make sure we're up to date on metadata + try { + Connection.Future ksFuture = connection.write(new QueryMessage(SELECT_KEYSPACES)); + Connection.Future cfFuture = connection.write(new QueryMessage(SELECT_COLUMN_FAMILIES)); + Connection.Future colsFuture = connection.write(new QueryMessage(SELECT_COLUMNS)); + + // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified + cluster.metadata.rebuildSchema(Session.toResultSet(ksFuture), + Session.toResultSet(cfFuture), + Session.toResultSet(colsFuture)); + } catch (ConnectionException e) { + // TODO: log + reconnect(); + } + } + + public void onUp(Host host) { + balancingPolicy.onUp(host); + } + + public void onDown(Host host) { + balancingPolicy.onDown(host); + + // TODO: we should look if that's the host we're connected with and + // attempt a reconnect. However we need to make that thread safe + // somehow. + } + + public void onAdd(Host host) { + balancingPolicy.onAdd(host); + } + + public void onRemove(Host host) { + balancingPolicy.onRemove(host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 45e405baa8e..c4b3a9fd9b0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -16,6 +16,8 @@ public class Host { private final InetSocketAddress address; private final HealthMonitor monitor; + // ClusterMetadata keeps one Host object per inet address, so don't use + // that constructor unless you know what you do (use ClusterMetadata.getHost typically). Host(InetSocketAddress address, ConvictionPolicy.Factory policy) { if (address == null || policy == null) throw new NullPointerException(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index e0948e0b737..23553bc2c8f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -30,10 +30,10 @@ public class Session { private static final Logger logger = LoggerFactory.getLogger(Session.class); - private final Manager manager; + final Manager manager; // Package protected, only Cluster should construct that. - Session(Cluster cluster, List hosts) { + Session(Cluster cluster, Collection hosts) { this.manager = new Manager(cluster, hosts); } @@ -231,7 +231,7 @@ private PreparedStatement toPreparedStatement(Connection.Future future) { } } - private static class Manager implements Host.StateListener { + static class Manager implements Host.StateListener { private final Cluster cluster; @@ -243,7 +243,7 @@ private static class Manager implements Host.StateListener { // TODO: Make that configurable private final long DEFAULT_CONNECTION_TIMEOUT = 3000; - public Manager(Cluster cluster, List hosts) { + public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; // TODO: consider the use of NonBlockingHashMap @@ -251,17 +251,8 @@ public Manager(Cluster cluster, List hosts) { this.loadBalancer = cluster.manager.loadBalancingFactory.create(hosts); this.poolsConfiguration = new HostConnectionPool.Configuration(); - for (Host host : hosts) { - logger.debug("Adding new host " + host); - host.monitor().register(this); - + for (Host host : hosts) addHost(host); - // If we fail to connect, the pool will be shutdown right away - if (pools.get(host).isShutdown()) { - logger.debug("Cannot connect to " + host); - pools.remove(host); - } - } } private HostConnectionPool addHost(Host host) { @@ -287,11 +278,21 @@ public void onDown(Host host) { } public void onAdd(Host host) { - // TODO + HostConnectionPool previous = addHost(host);; + loadBalancer.onAdd(host); + + // This should not be necessary, especially since the host is + // supposed to be new, but it's safer to make that work correctly + // if the even is triggered multiple times. + if (previous != null) + previous.shutdown(); } public void onRemove(Host host) { - // TODO + loadBalancer.onRemove(host); + HostConnectionPool pool = pools.remove(host); + if (pool != null) + pool.shutdown(); } public void setKeyspace(String keyspace) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 95172074eab..203231c860f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -123,6 +123,14 @@ public String getName() { return name; } + public KeyspaceMetadata getKeyspace() { + return keyspace; + } + + public ColumnMetadata getColumn(String name) { + return columns.get(name); + } + // :_( private static ObjectMapper jsonMapper = new ObjectMapper(new JsonFactory()); diff --git a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java index e55b912f891..9a89c45d628 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java @@ -51,6 +51,8 @@ public void run() { for (int i = 0; i < configuration.coreConnections; i++) if (!addConnection()) break; + + logger.trace(String.format("Created connection pool to host %s", host)); } public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException { diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 05701306028..ef1dcc6a6b8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -151,6 +151,8 @@ public void setKeyspace(String keyspace) throws ConnectionException { try { logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); + // TODO: Handle the case where we get an error because the keyspace doesn't + // exist (and don't set the keyspace to retry later) write(new QueryMessage("USE " + keyspace)).get(); this.keyspace = keyspace; } catch (ConnectionException e) { @@ -254,6 +256,10 @@ public void close() { // Note: we must not call releaseExternalResources, because this shutdown the executors, which are shared } + public boolean isClosed() { + return isClosed; + } + // Cruft needed because we reuse server side classes, but we don't care about it public void validateNewMessage(Message.Type type) {}; public void applyStateTransition(Message.Type requestType, Message.Type responseType) {}; @@ -266,6 +272,11 @@ public static class Factory { private final ExecutorService workerExecutor = Executors.newCachedThreadPool(); private final ConcurrentMap idGenerators = new ConcurrentHashMap(); + private final DefaultResponseHandler defaultHandler; + + public Factory(DefaultResponseHandler defaultHandler) { + this.defaultHandler = defaultHandler; + } /** * Opens a new connection to the node this factory points to. @@ -301,6 +312,10 @@ private ClientBootstrap bootstrap() { return b; } + + public DefaultResponseHandler defaultHandler() { + return defaultHandler; + } } // TODO: Having a map of Integer -> ResponseHandler might be overkill if we @@ -327,9 +342,10 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { } else { Message.Response response = (Message.Response)e.getMessage(); int streamId = response.getStreamId(); - if (streamId < 0) - // TODO: fix - throw new UnsupportedOperationException("Stream initiated server side are not yet supported"); + if (streamId < 0) { + factory.defaultHandler().handle(response); + return; + } ResponseHandler handler = pending.remove(streamId); streamIdHandler.release(streamId); @@ -399,6 +415,10 @@ private interface ResponseHandler { public ResponseCallback callback(); } + public interface DefaultResponseHandler { + public void handle(Message.Response response); + } + private static class PipelineFactory implements ChannelPipelineFactory { // Stateless handlers private static final Message.ProtocolDecoder messageDecoder = new Message.ProtocolDecoder(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java index 02665f34f6d..9863d154826 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java @@ -92,6 +92,10 @@ public void onRemove(Host host) { public static class Factory implements LoadBalancingPolicy.Factory { + public static final Factory INSTANCE = new Factory(); + + private Factory() {} + public LoadBalancingPolicy create(Collection hosts) { return new RoundRobinPolicy(hosts); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index f2698946236..3571e8e0e85 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -110,12 +110,12 @@ public void MultiNodeContinuousExecuteTest() throws Exception { session.use("test_ks"); session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - System.out.println("--- Schema ---\n" + cluster.getMetadata()); + //System.out.println("--- Schema ---\n" + cluster.getMetadata()); - //for (int i = 0; i < 10000; ++i) { - // System.out.println(">> " + i); - // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - // Thread.currentThread().sleep(1000); - //} + for (int i = 0; i < 10000; ++i) { + System.out.println(">> " + i); + session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + Thread.currentThread().sleep(1000); + } } } From ac546c21bb7ea7dc8b7cdd5e008c0e3548aa3261 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Sat, 15 Sep 2012 14:18:27 +0200 Subject: [PATCH 024/719] Implement proto for a retry policy - wip --- .../com/datastax/driver/core/Cluster.java | 5 +- .../datastax/driver/core/ClusterMetadata.java | 39 +++++- .../driver/core/ConsistencyLevel.java | 27 ++++ .../driver/core/KeyspaceMetadata.java | 76 ++++++++++-- .../com/datastax/driver/core/RetryPolicy.java | 38 ++++++ .../datastax/driver/core/RetryingFuture.java | 92 ++++++++++++++ .../com/datastax/driver/core/Session.java | 38 ++++-- .../datastax/driver/core/TableMetadata.java | 115 +++++++++++++++--- .../driver/core/transport/Connection.java | 79 ++++++------ 9 files changed, 426 insertions(+), 83 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 99f1b9d5216..112077d498a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -286,6 +286,9 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // TODO: give a name to the threads of this executor private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1); + // TODO: give a name to the threads of this executor + final ExecutorService executor = Executors.newCachedThreadPool(); + private Manager(List contactPoints) throws ConnectionException { this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; @@ -367,7 +370,7 @@ public void handle(Message.Response response) { // When handle is called, the current thread is a network I/O // thread, and we don't want to block it (typically addHost() will // create the connection pool to the new node, which can take time) - scheduledExecutor.execute(new Runnable() { + executor.execute(new Runnable() { public void run() { switch (event.type) { case TOPOLOGY_CHANGE: diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 5581bf83150..03efc807cc7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -99,17 +99,48 @@ Collection allHosts() { return hosts.values(); } + /** + * Returns the known hosts of this cluster. + * + * @return A set will all the know host of this cluster. + */ + public Set getAllHosts() { + return new HashSet(allHosts()); + } + + /** + * Return the metadata of a keyspace given its name. + * + * @param keyspace the name of the keyspace for which metadata should be + * returned. + * @return the metadat of the requested keyspace or {@code null} if {@code + * keyspace} is not a known keyspace. + */ public KeyspaceMetadata getKeyspace(String keyspace) { return keyspaces.get(keyspace); } - // TODO: Returning a multi-line string from toString might not be a good idea - @Override - public String toString() { + /** + * Return a {@code String} containing CQL queries representing the schema + * of this cluster. + * + * In other words, this method returns the queries that would allow to + * recreate the schema of this cluster. + * + * Note that the returned String is formatted to be human readable (for + * some defintion of human readable at least). + * + * @return the CQL queries representing this cluster schema as a {code + * String}. + */ + // TODO: add some boolean arg to deal with thift defs that can't be fully + // represented by CQL queries (like either throw an exception or + // do-our-best). Or some other way to deal with that. + public String exportSchemaAsString() { StringBuilder sb = new StringBuilder(); for (KeyspaceMetadata ksm : keyspaces.values()) - sb.append(ksm).append("\n"); + sb.append(ksm.exportAsString()).append("\n"); return sb.toString(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java new file mode 100644 index 00000000000..b5b06fc896c --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java @@ -0,0 +1,27 @@ +package com.datastax.driver.core; + +public enum ConsistencyLevel +{ + ANY, + ONE, + TWO, + THREE, + QUORUM, + ALL, + LOCAL_QUORUM, + EACH_QUORUM; + + public static ConsistencyLevel from(org.apache.cassandra.db.ConsistencyLevel cl) { + switch (cl) { + case ANY: return ANY; + case ONE: return ONE; + case TWO: return TWO; + case THREE: return THREE; + case QUORUM: return QUORUM; + case ALL: return ALL; + case LOCAL_QUORUM: return LOCAL_QUORUM; + case EACH_QUORUM: return EACH_QUORUM; + } + throw new AssertionError(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index 684d2818367..6ac9220e3d4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -3,7 +3,7 @@ import java.util.*; /** - * Describes the keyspace defined in the cluster, i.e. the current schema. + * Describes a keyspace defined in this cluster. */ public class KeyspaceMetadata { @@ -32,29 +32,89 @@ static KeyspaceMetadata build(CQLRow row) { return ksm; } + /** + * Returns the name of this keyspace. + * + * @return the name of this CQL keyspace. + */ public String getName() { return name; } + /** + * Returns whether durable writes are set on this keyspace. + * + * @return {@code true} if durable writes are set on this keyspace (the + * default), {@code false} otherwise. + */ public boolean isDurableWrites() { return durableWrites; } - public Map getReplicationStrategy() { + /** + * Returns the replication options for this keyspace. + * + * @return a map containing the replication options for this keyspace. + */ + public Map getReplication() { return new HashMap(replication); } + /** + * Returns the metadata for a table contained in this keyspace. + * + * @param name the name of table to retrieve + * @return the metadata for table {@code name} in this keyspace if it + * exists, {@code false} otherwise. + */ public TableMetadata getTable(String name) { return tables.get(name); } + /** + * Returns the tables defined in this keyspace. + * + * @return a collection of the metadata for the tables defined in this + * keyspace. + */ public Collection getTables() { return tables.values(); } - // TODO: Returning a multi-line string from toString might not be a good idea - @Override - public String toString() { + /** + * Return a {@code String} containing CQL queries representing this + * keyspace and the table it contains. + * + * In other words, this method returns the queries that would allow to + * recreate the schema of this keyspace, along with all its table. + * + * Note that the returned String is formatted to be human readable (for + * some defintion of human readable at least). + * + * @return the CQL queries representing this keyspace schema as a {code + * String}. + */ + public String exportAsString() { + StringBuilder sb = new StringBuilder(); + + sb.append(asCQLQuery()).append("\n"); + + for (TableMetadata tm : tables.values()) + sb.append("\n").append(tm.exportAsString()); + + return sb.toString(); + } + + /** + * Returns a CQL query representing this keyspace. + * + * This method returns a single 'CREATE KEYSPACE' query with the options + * corresponding to this keyspace definition. + * + * @return the 'CREATE KEYSPACE' query corresponding to this keyspace. + * @see #exportAsString + */ + public String asCQLQuery() { StringBuilder sb = new StringBuilder(); sb.append("CREATE KEYSPACE ").append(name).append(" WITH "); @@ -65,11 +125,7 @@ public String toString() { sb.append(", '").append(entry.getKey()).append("': '").append(entry.getValue()).append("'"); } sb.append(" } AND DURABLE_WRITES = ").append(durableWrites); - sb.append(";\n"); - - for (TableMetadata tm : tables.values()) - sb.append("\n").append(tm); - + sb.append(";"); return sb.toString(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java new file mode 100644 index 00000000000..243e93534b1 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java @@ -0,0 +1,38 @@ +package com.datastax.driver.core; + +/** + * A policy that defines a default comportment to adopt when a request returns + * a TimeoutException or an UnavailableException. + * + * TODO: is that really useful to have such details if one cannot modify the request? + */ +public interface RetryPolicy { + + public boolean onReadTimeout(ConsistencyLevel cl, int required, int received, boolean dataPresent, int nbRetry); + + public boolean onWriteTimeout(ConsistencyLevel cl, int required, int received, int nbRetry); + + public boolean onUnavailable(ConsistencyLevel cl, int required, int alive, int nbRetry); + + public static class DefaultPolicy implements RetryPolicy { + + public static final DefaultPolicy INSTANCE = new DefaultPolicy(); + + private DefaultPolicy() {} + + public boolean onReadTimeout(ConsistencyLevel cl, int required, int received, boolean dataPresent, int nbRetry) { + if (nbRetry > 1) + return false; + + return received >= required && !dataPresent; + } + + public boolean onWriteTimeout(ConsistencyLevel cl, int required, int received, int nbRetry) { + return false; + } + + public boolean onUnavailable(ConsistencyLevel cl, int required, int alive, int nbRetry) { + return false; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java new file mode 100644 index 00000000000..dc4931a6ad8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java @@ -0,0 +1,92 @@ +package com.datastax.driver.core; + +import com.datastax.driver.core.transport.*; +import com.datastax.driver.core.utils.SimpleFuture; + +import org.apache.cassandra.transport.Message; +import org.apache.cassandra.transport.messages.ErrorMessage; +import org.apache.cassandra.exceptions.*; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +class RetryingFuture extends SimpleFuture implements Connection.ResponseCallback { + + private static final Logger logger = LoggerFactory.getLogger(RetryingFuture.class); + + private final Session.Manager manager; + private final Message.Request request; + + private volatile int retries; + + public RetryingFuture(Session.Manager manager, Message.Request request) { + this.manager = manager; + this.request = request; + } + + public Message.Request getRequest() { + return request; + } + + @Override + public void onSet(Message.Response response) { + switch (response.type) { + case RESULT: + super.set(response); + break; + case ERROR: + ErrorMessage err = (ErrorMessage)response; + boolean retry = false; + switch (err.error.code()) { + // TODO: Handle cases take into account by the retry policy + case READ_TIMEOUT: + assert err.error instanceof ReadTimeoutException; + ReadTimeoutException rte = (ReadTimeoutException)err.error; + ConsistencyLevel rcl = ConsistencyLevel.from(rte.consistency); + retry = manager.retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, retries); + break; + case WRITE_TIMEOUT: + assert err.error instanceof WriteTimeoutException; + WriteTimeoutException wte = (WriteTimeoutException)err.error; + ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); + retry = manager.retryPolicy.onWriteTimeout(wcl, wte.received, wte.blockFor, retries); + break; + case UNAVAILABLE: + assert err.error instanceof UnavailableException; + UnavailableException ue = (UnavailableException)err.error; + ConsistencyLevel ucl = ConsistencyLevel.from(ue.consistency); + retry = manager.retryPolicy.onUnavailable(ucl, ue.required, ue.alive, retries); + break; + case OVERLOADED: + // TODO: maybe we could make that part of the retrying policy? + // retry once + if (retries == 0) + retry = true; + break; + case IS_BOOTSTRAPPING: + // TODO: log error as this shouldn't happen + // retry once + if (retries == 0) + retry = true; + break; + } + if (retry) { + ++retries; + manager.retry(this); + } else { + super.set(response); + } + + break; + default: + // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) + logger.info("Got " + response); + break; + } + } + + @Override + public void onException(Exception exception) { + super.setException(exception); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 23553bc2c8f..6dc599aca65 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -68,7 +68,7 @@ public Session use(String keyspace) { public ResultSet execute(String query) { // TODO: Deal with exceptions try { - return toResultSet(manager.execute(new QueryMessage(query))); + return toResultSet(manager.executeWithRetry(new QueryMessage(query))); } catch (Exception e) { throw new RuntimeException(e); } @@ -134,7 +134,9 @@ public ResultSet.Future executeAsync(CQLQuery query) { public PreparedStatement prepare(String query) { // TODO: Deal with exceptions try { - return toPreparedStatement(manager.execute(new PrepareMessage(query))); + Connection.Future future = new Connection.Future(); + manager.execute(new PrepareMessage(query), future); + return toPreparedStatement(future); } catch (Exception e) { throw new RuntimeException(e); } @@ -167,7 +169,7 @@ public PreparedStatement prepare(CQLQuery query) { public ResultSet executePrepared(BoundStatement stmt) { // TODO: Deal with exceptions try { - return toResultSet(manager.execute(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values)))); + return toResultSet(manager.executeWithRetry(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values)))); } catch (Exception e) { throw new RuntimeException(e); } @@ -189,7 +191,7 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { return null; } - static ResultSet toResultSet(Connection.Future future) { + static ResultSet toResultSet(Future future) { try { Message.Response response = future.get(); switch (response.type) { @@ -238,6 +240,9 @@ static class Manager implements Host.StateListener { private final ConcurrentMap pools; private final LoadBalancingPolicy loadBalancer; + // TODO: make that configurable + final RetryPolicy retryPolicy = RetryPolicy.DefaultPolicy.INSTANCE; + private final HostConnectionPool.Configuration poolsConfiguration; // TODO: Make that configurable @@ -307,7 +312,7 @@ public void setKeyspace(String keyspace) { * * @return a future on the response to the request. */ - public Connection.Future execute(Message.Request msg) { + public void execute(Message.Request msg, Connection.ResponseCallback callback) { Iterator plan = loadBalancer.newQueryPlan(); while (plan.hasNext()) { @@ -319,7 +324,8 @@ public Connection.Future execute(Message.Request msg) { try { Connection connection = pool.borrowConnection(DEFAULT_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); try { - return connection.write(msg); + connection.write(msg, callback); + return; } finally { pool.returnConnection(connection); } @@ -330,7 +336,25 @@ public Connection.Future execute(Message.Request msg) { } } // TODO: Change that to a "NoAvailableHostException" - throw new RuntimeException(); + callback.onException(new RuntimeException()); + } + + // TODO: this will need to evolve a bit for async calls + public Future executeWithRetry(Message.Request msg) { + RetryingFuture future = new RetryingFuture(this, msg); + execute(msg, future); + return future; + } + + // TODO: This doesn't work for prepared statement, fix it. + public void retry(final RetryingFuture retryFuture) { + // TODO: retry callback on executor (to avoid doing write on IO + // thread) + cluster.manager.executor.execute(new Runnable() { + public void run() { + execute(retryFuture.getRequest(), retryFuture); + } + }); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 203231c860f..378c00c253c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -119,14 +119,31 @@ static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { } } + /** + * Returns the name of this table. + * + * @return the name of this CQL table. + */ public String getName() { return name; } + /** + * Returns the keyspace this table belong to. + * + * @return the keyspace metadata of the keyspace this table belong to. + */ public KeyspaceMetadata getKeyspace() { return keyspace; } + /** + * Returns metadata on a column of this table. + * + * @param name the name of the column to retrieve. + * @return the metadata for the {@code name} column if it exists, or + * {@code null} otherwise. + */ public ColumnMetadata getColumn(String name) { return columns.get(name); } @@ -154,18 +171,56 @@ void add(ColumnMetadata column) { columns.put(column.getName(), column); } - // TODO: Returning a multi-line string from toString might not be a good idea - @Override - public String toString() { + /** + * Return a {@code String} containing CQL queries representing this + * table and the index on it. + * + * In other words, this method returns the queries that would allow to + * recreate the schema of this table, along with the index defined on + * columns of this table. + * + * Note that the returned String is formatted to be human readable (for + * some defintion of human readable at least). + * + * @return the CQL queries representing this table schema as a {code + * String}. + */ + public String exportAsString() { StringBuilder sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(name).append(" (\n"); - for (ColumnMetadata cm : columns.values()) { - sb.append(" ").append(cm).append(",\n"); - } + sb.append(asCQLQuery(true)).append("\n"); + + // TODO: handle indexes + + return sb.toString(); + } + + /** + * Returns a CQL query representing this table. + * + * This method returns a single 'CREATE TABLE' query with the options + * corresponding to this table definition. + * + * Note that the returned string will be a single line; the returned query + * is not formatted in any way. + * + * @return the 'CREATE TABLE' query corresponding to this table. + * @see #exportAsString + */ + public String asCQLQuery() { + return asCQLQuery(false); + } + + private String asCQLQuery(boolean formatted) { + StringBuilder sb = new StringBuilder(); + + sb.append("CREATE TABLE ").append(name); + newLine(sb, formatted); + for (ColumnMetadata cm : columns.values()) + newLine(sb.append(spaces(4, formatted)).append(cm), formatted); // PK - sb.append(" ").append("PRIMARY KEY ("); + sb.append(spaces(4, formatted)).append("PRIMARY KEY ("); if (partitionKey.size() == 1) { sb.append(partitionKey.get(0).getName()); } else { @@ -179,27 +234,51 @@ public String toString() { } for (ColumnMetadata cm : clusteringKey) sb.append(", ").append(cm.getName()); - sb.append(")\n"); - // end PK sb.append(")"); + newLine(sb, formatted); + // end PK + + newLine(sb, formatted); // Options sb.append(" WITH read_repair_chance = ").append(options.readRepair); - sb.append("\n AND local_read_repair_chance = ").append(options.localReadRepair); - sb.append("\n AND replicate_on_write = ").append(options.replicateOnWrite); - sb.append("\n AND gc_grace_seconds = ").append(options.gcGrace); - sb.append("\n AND bloom_filter_fp_chance = ").append(options.bfFpChance); - sb.append("\n AND caching = ").append(options.caching); + and(sb, formatted).append("local_read_repair_chance = ").append(options.localReadRepair); + and(sb, formatted).append("replicate_on_write = ").append(options.replicateOnWrite); + and(sb, formatted).append("gc_grace_seconds = ").append(options.gcGrace); + and(sb, formatted).append("bloom_filter_fp_chance = ").append(options.bfFpChance); + and(sb, formatted).append("caching = ").append(options.caching); if (options.comment != null) - sb.append("\n AND comment = ").append(options.comment); + and(sb, formatted).append("comment = ").append(options.comment); // TODO: finish (compaction and compression) - sb.append(";\n"); + newLine(sb, formatted); + + return sb.toString(); + } + + private StringBuilder and(StringBuilder sb, boolean formatted) { + return newLine(sb, formatted).append(spaces(3, formatted)).append("AND "); + } + + private String spaces(int n, boolean formatted) { + if (!formatted) + return ""; + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < n; i++) + sb.append(' '); + return sb.toString(); } + private StringBuilder newLine(StringBuilder sb, boolean formatted) { + if (formatted) + sb.append('\n'); + return sb; + } + // TODO: add getter for those - private static class Options { + public static class Options { private static final String COMMENT = "comment"; private static final String READ_REPAIR = "read_repair_chance"; diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index ef1dcc6a6b8..153efd66a7f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -153,8 +153,22 @@ public void setKeyspace(String keyspace) throws ConnectionException { logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); // TODO: Handle the case where we get an error because the keyspace doesn't // exist (and don't set the keyspace to retry later) - write(new QueryMessage("USE " + keyspace)).get(); - this.keyspace = keyspace; + Message.Response response = write(new QueryMessage("USE " + keyspace)).get(); + switch (response.type) { + case RESULT: + this.keyspace = keyspace; + break; + case ERROR: + // TODO: what to do when that happens? It could be that the + // node doesn't know about that keyspace even though it + // exists (new node not yet up on schemas?) + logger.debug(String.format("Cannot set keyspace %s (%s)", keyspace, response)); + break; + default: + // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) + logger.info("Got " + response); + return null; + } } catch (ConnectionException e) { throw defunct(e); } catch (ExecutionException e) { @@ -174,26 +188,13 @@ public void setKeyspace(String keyspace) throws ConnectionException { * @throws TransportException if an I/O error while sending the request */ public Future write(Message.Request request) throws ConnectionException { - Future future = new Future(this, dispatcher.streamIdHandler.next()); - internalWrite(request, future); + Future future = new Future(); + write(request, future); return future; } - public void write(Message.Request request, final ResponseCallback callback) throws ConnectionException { - final int streamId = dispatcher.streamIdHandler.next(); - internalWrite(request, new ResponseHandler() { - - public int getStreamId() { - return streamId; - } - - public ResponseCallback callback() { - return callback; - } - }); - } + public void write(Message.Request request, ResponseCallback callback) throws ConnectionException { - private void internalWrite(Message.Request request, ResponseHandler handler) throws ConnectionException { if (isDefunct) throw new ConnectionException(address, "Write attempt on defunct connection"); @@ -206,8 +207,9 @@ private void internalWrite(Message.Request request, ResponseHandler handler) thr inFlight.incrementAndGet(); try { + ResponseHandler handler = new ResponseHandler(dispatcher, callback); dispatcher.add(handler); - request.setStreamId(handler.getStreamId()); + request.setStreamId(handler.streamId); logger.trace(String.format("[%s] writting request %s", name, request)); ChannelFuture writeFuture = channel.write(request); @@ -327,7 +329,7 @@ private class Dispatcher extends SimpleChannelUpstreamHandler { private final ConcurrentMap pending = new ConcurrentHashMap(); public void add(ResponseHandler handler) { - ResponseHandler old = pending.put(handler.getStreamId(), handler); + ResponseHandler old = pending.put(handler.streamId, handler); assert old == null; } @@ -352,7 +354,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { if (handler == null) // TODO: we should handle those with a default handler throw new RuntimeException("No handler set for " + streamId + ", handlers = " + pending); - handler.callback().onSet(response); + handler.callback.onSet(response); } } @@ -371,28 +373,13 @@ public void errorOutAllHandler(ConnectionException ce) { Iterator iter = pending.values().iterator(); while (iter.hasNext()) { - iter.next().callback().onException(ce); + iter.next().callback.onException(ce); iter.remove(); } } } - public static class Future extends SimpleFuture implements ResponseHandler, ResponseCallback { - private final Connection connection; - private final int streamId; - - public Future(Connection connection, int streamId) { - this.connection = connection; - this.streamId = streamId; - } - - public int getStreamId() { - return streamId; - } - - public ResponseCallback callback() { - return this; - } + public static class Future extends SimpleFuture implements ResponseCallback { @Override public void onSet(Message.Response response) { @@ -400,19 +387,25 @@ public void onSet(Message.Response response) { } @Override - public void onException(ConnectionException exception) { + public void onException(Exception exception) { super.setException(exception); } } public interface ResponseCallback { public void onSet(Message.Response response); - public void onException(ConnectionException exception); + public void onException(Exception exception); } - private interface ResponseHandler { - public int getStreamId(); - public ResponseCallback callback(); + private static class ResponseHandler { + + public final int streamId; + public final ResponseCallback callback; + + public ResponseHandler(Dispatcher dispatcher, ResponseCallback callback) { + this.streamId = dispatcher.streamIdHandler.next(); + this.callback = callback; + } } public interface DefaultResponseHandler { From a43f981934316335ceb0fa9fc03ffb32d039a05b Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 17 Sep 2012 17:40:24 +0200 Subject: [PATCH 025/719] Add collections support --- .../java/com/datastax/driver/core/CQLRow.java | 53 ++++++++++++----- .../com/datastax/driver/core/DataType.java | 59 ++++++++++++++++++- .../datastax/driver/core/transport/Codec.java | 37 +++++++++++- .../driver/core/transport/Connection.java | 2 +- .../com/datastax/driver/core/SessionTest.java | 42 ++++++++++--- 5 files changed, 164 insertions(+), 29 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 663fc57b7a8..339d813b11f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -519,31 +519,54 @@ public UUID getUUID(String name) { return getUUID(metadata.getIdx(name)); } - public List getList(int i, Class elts) { - // TODO - return null; + // TODO: I don't have a good idea on how to make that typesafe in a way that is not ugly + public List getList(int i) { + DataType type = metadata.type(i); + if (!(type instanceof DataType.Collection.List)) + throw new InvalidTypeException(String.format("Column %s is not of list type", metadata.name(i))); + + ByteBuffer value = data.get(i); + if (value == null) + return null; + + // TODO: we could avoid the getCodec call if we kept a reference to the original message. + return (List)Codec.getCodec(type).compose(value); } - public List getList(String name, Class elts) { - return getList(metadata.getIdx(name), elts); + public List getList(String name) { + return getList(metadata.getIdx(name)); } - public Set getSet(int i, Class elts) { - // TODO - return null; + public Set getSet(int i) { + DataType type = metadata.type(i); + if (!(type instanceof DataType.Collection.Set)) + throw new InvalidTypeException(String.format("Column %s is not of set type", metadata.name(i))); + + ByteBuffer value = data.get(i); + if (value == null) + return null; + + return (Set)Codec.getCodec(type).compose(value); } - public Set getSet(String name, Class elts) { - return getSet(metadata.getIdx(name), elts); + public Set getSet(String name) { + return getSet(metadata.getIdx(name)); } - public Map getMap(int i, Class keys, Class values) { - // TODO - return null; + public Map getMap(int i) { + DataType type = metadata.type(i); + if (!(type instanceof DataType.Collection.Map)) + throw new InvalidTypeException(String.format("Column %s is not of map type", metadata.name(i))); + + ByteBuffer value = data.get(i); + if (value == null) + return null; + + return (Map)Codec.getCodec(type).compose(value); } - public Map getMap(String name, Class keys, Class values) { - return getMap(metadata.getIdx(name), keys, values); + public Map getMap(String name) { + return getMap(metadata.getIdx(name)); } @Override diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index 0a1c0dda1e0..c4a179f727d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -93,14 +93,69 @@ public enum Native implements DataType { /** * A collection type (lists, sets and maps). */ - public static class Collection implements DataType { - // TODO + public static abstract class Collection implements DataType { + + // TODO: Type is a very ugly/confusing name + public enum Type { LIST, SET, MAP }; + + private final Type type; + + protected Collection(Type type) { + this.type = type; + } public Kind kind() { return Kind.COLLECTION; } + public Type collectionType() { return type; } + public Native asNative() { throw new IllegalStateException("Not a native type, but a collection one"); } public Collection asCollection() { return this; } public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a collection one"); } + + public static class List extends Collection { + private final DataType elementsType; + + public List(DataType elementsType) { + super(Type.LIST); + this.elementsType = elementsType; + } + + public DataType getElementsType() { + return elementsType; + } + } + + public static class Set extends Collection { + private final DataType elementsType; + + public Set(DataType elementsType) { + super(Type.SET); + this.elementsType = elementsType; + } + + public DataType getElementsType() { + return elementsType; + } + } + + public static class Map extends Collection { + private final DataType keysType; + private final DataType valuesType; + + public Map(DataType keysType, DataType valuesType) { + super(Type.MAP); + this.keysType = keysType; + this.valuesType = valuesType; + } + + public DataType getKeysType() { + return keysType; + } + + public DataType getValuesType() { + return keysType; + } + } } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java index 38950c5cf6e..7d8e19ae9ce 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java @@ -68,7 +68,22 @@ private static AbstractType nativeCodec(DataType.Native type) { } private static AbstractType collectionCodec(DataType.Collection type) { - return null; + + switch (type.collectionType()) { + case LIST: + AbstractType listElts = getCodec(((DataType.Collection.List)type).getElementsType()); + return ListType.getInstance(listElts); + case SET: + AbstractType setElts = getCodec(((DataType.Collection.Set)type).getElementsType()); + return SetType.getInstance(setElts); + case MAP: + DataType.Collection.Map mt = (DataType.Collection.Map)type; + AbstractType mapKeys = getCodec(mt.getKeysType()); + AbstractType mapValues = getCodec(mt.getKeysType()); + return MapType.getInstance(mapKeys, mapValues); + default: + throw new RuntimeException("Unknown collection type"); + } } private static AbstractType customCodec(DataType.Custom type) { @@ -80,7 +95,25 @@ public static DataType rawTypeToDataType(AbstractType rawType) { if (type != null) return type; - // TODO: handle collections and custom + if (rawType instanceof CollectionType) { + switch (((CollectionType)rawType).kind) { + case LIST: + DataType listElts = rawTypeToDataType(((ListType)rawType).elements); + return new DataType.Collection.List(listElts); + case SET: + DataType setElts = rawTypeToDataType(((SetType)rawType).elements); + return new DataType.Collection.Set(setElts); + case MAP: + MapType mt = (MapType)rawType; + DataType mapKeys = rawTypeToDataType(mt.keys); + DataType mapValues = rawTypeToDataType(mt.values); + return new DataType.Collection.Map(mapKeys, mapValues); + default: + throw new RuntimeException("Unknown collection type"); + } + } + + // TODO: handle custom return null; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 153efd66a7f..9c83368e485 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -167,7 +167,7 @@ public void setKeyspace(String keyspace) throws ConnectionException { default: // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) logger.info("Got " + response); - return null; + return; } } catch (ConnectionException e) { throw defunct(e); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 3571e8e0e85..a2bc789d8e5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -99,23 +99,47 @@ public static void classSetUp() { //} @Test - public void MultiNodeContinuousExecuteTest() throws Exception { + public void CollectionsTest() throws Exception { - Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); + Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); Session session = cluster.connect(); - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); // We should deal with that sleep try { Thread.sleep(1000); } catch (Exception e) {} session.use("test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); - //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); + for (CQLRow row : session.execute("SELECT * FROM test")) { + List l = (List)row.getList("l"); + Set s = (Set)row.getSet("s"); + Map m = (Map)row.getMap("m"); - for (int i = 0; i < 10000; ++i) { - System.out.println(">> " + i); - session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - Thread.currentThread().sleep(1000); + System.out.println("l = " + l); + System.out.println("s = " + s); + System.out.println("m = " + m); } } + + //@Test + //public void MultiNodeContinuousExecuteTest() throws Exception { + + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); + // Session session = cluster.connect(); + + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + // // We should deal with that sleep + // try { Thread.sleep(1000); } catch (Exception e) {} + // session.use("test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + + // //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + + // for (int i = 0; i < 10000; ++i) { + // System.out.println(">> " + i); + // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + // Thread.currentThread().sleep(1000); + // } + //} } From 83998915ba04ffa8f63f2423f07d983e70b7adf8 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 17 Sep 2012 20:05:00 +0200 Subject: [PATCH 026/719] Add async execution support --- .../driver/core/ControlConnection.java | 20 ++-- .../java/com/datastax/driver/core/Host.java | 4 +- .../com/datastax/driver/core/ResultSet.java | 54 +++++++++- ...ryingFuture.java => RetryingCallback.java} | 17 +-- .../com/datastax/driver/core/Session.java | 41 ++----- .../datastax/driver/core/TableMetadata.java | 102 ++++++++++++++++-- .../driver/core/transport/Connection.java | 1 + .../com/datastax/driver/core/SessionTest.java | 15 ++- 8 files changed, 190 insertions(+), 64 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{RetryingFuture.java => RetryingCallback.java} (87%) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 7ce26800297..e8b1b29339b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -3,6 +3,7 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.concurrent.ExecutionException; import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; @@ -76,17 +77,24 @@ public void refreshSchema() { // Make sure we're up to date on metadata try { - Connection.Future ksFuture = connection.write(new QueryMessage(SELECT_KEYSPACES)); - Connection.Future cfFuture = connection.write(new QueryMessage(SELECT_COLUMN_FAMILIES)); - Connection.Future colsFuture = connection.write(new QueryMessage(SELECT_COLUMNS)); + ResultSet.Future ksFuture = new ResultSet.Future(); + ResultSet.Future cfFuture = new ResultSet.Future(); + ResultSet.Future colsFuture = new ResultSet.Future(); + connection.write(new QueryMessage(SELECT_KEYSPACES), ksFuture); + connection.write(new QueryMessage(SELECT_COLUMN_FAMILIES), cfFuture); + connection.write(new QueryMessage(SELECT_COLUMNS), colsFuture); // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified - cluster.metadata.rebuildSchema(Session.toResultSet(ksFuture), - Session.toResultSet(cfFuture), - Session.toResultSet(colsFuture)); + cluster.metadata.rebuildSchema(ksFuture.get(), cfFuture.get(), colsFuture.get()); } catch (ConnectionException e) { // TODO: log reconnect(); + } catch (ExecutionException e) { + // TODO: log and decide what to do since in theory that shouldn't be a cassandra exception + reconnect(); + } catch (InterruptedException e) { + // TODO: it's bad to do that but at the same time it's annoying to be interrupted + throw new RuntimeException(e); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index c4b3a9fd9b0..ac4aa9fb168 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -92,7 +92,7 @@ public class HealthMonitor { * * Registering the same listener multiple times is a no-op. * - * @param listener the new {@linke Host.StateListener} to register. + * @param listener the new {@link Host.StateListener} to register. */ public void register(StateListener listener) { listeners.add(listener); @@ -104,7 +104,7 @@ public void register(StateListener listener) { * This method is a no-op if {@code listener} hadn't previously be * registered against this monitor. * - * @param listener the {@linke Host.StateListener} to unregister. + * @param listener the {@link Host.StateListener} to unregister. */ public void unregister(StateListener listener) { listeners.add(listener); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 10580a4c8c9..0ece986e6f6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -4,8 +4,18 @@ import java.util.*; import org.apache.cassandra.cql3.ColumnSpecification; +import org.apache.cassandra.transport.Message; +import org.apache.cassandra.transport.ProtocolException; +import org.apache.cassandra.transport.ServerError; +import org.apache.cassandra.transport.messages.ErrorMessage; import org.apache.cassandra.transport.messages.ResultMessage; +import com.datastax.driver.core.transport.Connection; +import com.datastax.driver.core.utils.SimpleFuture; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * The result of a query. * @@ -13,6 +23,8 @@ */ public class ResultSet implements Iterable { + private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); + private static final ResultSet EMPTY = new ResultSet(Columns.EMPTY, new ArrayDeque(0)); private final Columns metadata; @@ -127,8 +139,46 @@ public String toString() { return sb.toString(); } - public static class Future // implements java.util.concurrent.Future + public static class Future extends SimpleFuture implements Connection.ResponseCallback { - // TODO + Future() {} + + @Override + public void onSet(Message.Response response) { + try { + switch (response.type) { + case RESULT: + super.set(ResultSet.fromMessage((ResultMessage)response)); + break; + case ERROR: + super.setException(convertException(((ErrorMessage)response).error)); + break; + default: + // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) + logger.info("Got " + response); + throw new RuntimeException(); + } + } catch (Exception e) { + // TODO: do better + throw new RuntimeException(e); + } + } + + @Override + public void onException(Exception exception) { + super.setException(exception); + } + + // TODO: Convert to some internal exception + private Exception convertException(org.apache.cassandra.exceptions.TransportException te) { + + if (te instanceof ServerError) { + return new RuntimeException("An unexpected error occured server side: " + te.getMessage()); + } else if (te instanceof ProtocolException) { + return new RuntimeException("An unexpected protocol error occured. This is a bug in this library, please report: " + te.getMessage()); + } else { + return new RuntimeException(te.getMessage()); + } + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java similarity index 87% rename from driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java rename to driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index dc4931a6ad8..20251b297e8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -10,18 +10,20 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -class RetryingFuture extends SimpleFuture implements Connection.ResponseCallback { +class RetryingCallback implements Connection.ResponseCallback { - private static final Logger logger = LoggerFactory.getLogger(RetryingFuture.class); + private static final Logger logger = LoggerFactory.getLogger(RetryingCallback.class); private final Session.Manager manager; private final Message.Request request; + private final Connection.ResponseCallback callback; private volatile int retries; - public RetryingFuture(Session.Manager manager, Message.Request request) { + public RetryingCallback(Session.Manager manager, Message.Request request, Connection.ResponseCallback callback) { this.manager = manager; this.request = request; + this.callback = callback; } public Message.Request getRequest() { @@ -32,7 +34,7 @@ public Message.Request getRequest() { public void onSet(Message.Response response) { switch (response.type) { case RESULT: - super.set(response); + callback.onSet(response); break; case ERROR: ErrorMessage err = (ErrorMessage)response; @@ -74,19 +76,18 @@ public void onSet(Message.Response response) { ++retries; manager.retry(this); } else { - super.set(response); + callback.onSet(response); } break; default: - // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - logger.info("Got " + response); + callback.onSet(response); break; } } @Override public void onException(Exception exception) { - super.setException(exception); + callback.onException(exception); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 6dc599aca65..953a4e56129 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -68,7 +68,7 @@ public Session use(String keyspace) { public ResultSet execute(String query) { // TODO: Deal with exceptions try { - return toResultSet(manager.executeWithRetry(new QueryMessage(query))); + return executeAsync(query).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -107,7 +107,7 @@ public ResultSet execute(CQLQuery query) { * be empty and will be for any non SELECT query. */ public ResultSet.Future executeAsync(String query) { - return null; + return manager.executeWithRetry(new QueryMessage(query)); } /** @@ -122,7 +122,7 @@ public ResultSet.Future executeAsync(String query) { * @see #executeAsync(String) */ public ResultSet.Future executeAsync(CQLQuery query) { - return null; + return executeAsync(query.toString()); } /** @@ -169,7 +169,7 @@ public PreparedStatement prepare(CQLQuery query) { public ResultSet executePrepared(BoundStatement stmt) { // TODO: Deal with exceptions try { - return toResultSet(manager.executeWithRetry(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values)))); + return executePreparedAsync(stmt).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -188,28 +188,7 @@ public ResultSet executePrepared(BoundStatement stmt) { * be empty and will be for any non SELECT query. */ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { - return null; - } - - static ResultSet toResultSet(Future future) { - try { - Message.Response response = future.get(); - switch (response.type) { - case RESULT: - return ResultSet.fromMessage((ResultMessage)response); - case ERROR: - // TODO: handle errors - logger.info("Got " + response); - return null; - default: - // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - logger.info("Got " + response); - return null; - } - } catch (Exception e) { - // TODO: do better - throw new RuntimeException(e); - } + return manager.executeWithRetry(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); } private PreparedStatement toPreparedStatement(Connection.Future future) { @@ -340,19 +319,19 @@ public void execute(Message.Request msg, Connection.ResponseCallback callback) { } // TODO: this will need to evolve a bit for async calls - public Future executeWithRetry(Message.Request msg) { - RetryingFuture future = new RetryingFuture(this, msg); - execute(msg, future); + public ResultSet.Future executeWithRetry(Message.Request msg) { + ResultSet.Future future = new ResultSet.Future(); + execute(msg, new RetryingCallback(this, msg, future)); return future; } // TODO: This doesn't work for prepared statement, fix it. - public void retry(final RetryingFuture retryFuture) { + public void retry(final RetryingCallback retryCallback) { // TODO: retry callback on executor (to avoid doing write on IO // thread) cluster.manager.executor.execute(new Runnable() { public void run() { - execute(retryFuture.getRequest(), retryFuture); + execute(retryCallback.getRequest(), retryCallback); } }); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 378c00c253c..cc61e31c63c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -277,7 +277,6 @@ private StringBuilder newLine(StringBuilder sb, boolean formatted) { return sb; } - // TODO: add getter for those public static class Options { private static final String COMMENT = "comment"; @@ -295,15 +294,15 @@ public static class Options { private static final double DEFAULT_BF_FP_CHANCE = 0.01; - public final String comment; - public final double readRepair; - public final double localReadRepair; - public final boolean replicateOnWrite; - public final int gcGrace; - public final double bfFpChance; - public final String caching; - public final Map compaction = new HashMap(); - public final Map compression = new HashMap(); + private final String comment; + private final double readRepair; + private final double localReadRepair; + private final boolean replicateOnWrite; + private final int gcGrace; + private final double bfFpChance; + private final String caching; + private final Map compaction = new HashMap(); + private final Map compression = new HashMap(); public Options(CQLRow row) { this.comment = row.isNull(COMMENT) ? "" : row.getString(COMMENT); @@ -321,5 +320,88 @@ public Options(CQLRow row) { // TODO: this should split the parameters compression.put("params", row.getString(COMPRESSION_PARAMS)); } + + /** + * The commentary set for this table. + * + * @return the commentary set for this table, or {@code null} if noe has been set. + */ + public String comment() { + return comment; + } + + /** + * The chance with which a read repair is triggered for this table. + * + * @return the read repair change set for table (in [0.0, 1.0]). + */ + public double readRepairChance() { + return readRepair; + } + + /** + * The (cluster) local read repair chance set for this table. + * + * @return the local read repair change set for table (in [0.0, 1.0]). + */ + public double localReadRepairChance() { + return localReadRepair; + } + + /** + * Whether replicateOnWrite is set for this table. + * + * This is only meaningful for tables holding counters. + * + * @return whether replicateOnWrite is set for this table. + */ + public boolean replicateOnWrite() { + return replicateOnWrite; + } + + /** + * The tombstone garbage collection grace time in seconds for this table. + * + * @return the tombstone garbage collection grace time in seconds for this table. + */ + public int gcGraceInSeconds() { + return gcGrace; + } + + /** + * The false positive chance for the bloom filter of this table. + * + * @return the bloom filter false positive chance for this table (in [0.0, 1.0]). + */ + public double bloomFilterFalsePositiveChance() { + return bfFpChance; + } + + /** + * The caching option for this table. + * + * @return the caching option for this table. + */ + public String caching() { + return caching; + } + + /** + * The compaction options for this table. + * + * @return a map containing the compaction options for this table. + */ + public Map compaction() { + return new HashMap(compaction); + } + + /** + * The compression options for this table. + * + * @return a map containing the compression options for this table. + */ + public Map compression() { + return new HashMap(compression); + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 9c83368e485..f3f4e47444f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -379,6 +379,7 @@ public void errorOutAllHandler(ConnectionException ce) { } } + // TODO: Do we really need that after all? public static class Future extends SimpleFuture implements ResponseCallback { @Override diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index a2bc789d8e5..b545590a80b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -104,11 +104,16 @@ public void CollectionsTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); Session session = cluster.connect(); - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // We should deal with that sleep - try { Thread.sleep(1000); } catch (Exception e) {} - session.use("test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + try { + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // We should deal with that sleep + try { Thread.sleep(1000); } catch (Exception e) {} + session.use("test_ks"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + } catch (Exception e) { + // Skip if already created + session.use("test_ks"); + } session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); for (CQLRow row : session.execute("SELECT * FROM test")) { From 348dfa9e26dff2d2e6a550021d1831f672e1c2b5 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 18 Sep 2012 18:48:36 +0200 Subject: [PATCH 027/719] Refactor and better support for node joining/leaving --- .../driver/core/ControlConnection.java | 12 +- .../driver/core/ReconnectionHandler.java | 5 +- .../com/datastax/driver/core/ResultSet.java | 14 ++- .../driver/core/RetryingCallback.java | 104 +++++++++++++++--- .../com/datastax/driver/core/Session.java | 67 +++-------- .../driver/core/transport/Connection.java | 28 ++++- .../com/datastax/driver/core/SessionTest.java | 71 ++++++------ 7 files changed, 183 insertions(+), 118 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index e8b1b29339b..862fb24a05e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -77,12 +77,12 @@ public void refreshSchema() { // Make sure we're up to date on metadata try { - ResultSet.Future ksFuture = new ResultSet.Future(); - ResultSet.Future cfFuture = new ResultSet.Future(); - ResultSet.Future colsFuture = new ResultSet.Future(); - connection.write(new QueryMessage(SELECT_KEYSPACES), ksFuture); - connection.write(new QueryMessage(SELECT_COLUMN_FAMILIES), cfFuture); - connection.write(new QueryMessage(SELECT_COLUMNS), colsFuture); + ResultSet.Future ksFuture = new ResultSet.Future(new QueryMessage(SELECT_KEYSPACES)); + ResultSet.Future cfFuture = new ResultSet.Future(new QueryMessage(SELECT_COLUMN_FAMILIES)); + ResultSet.Future colsFuture = new ResultSet.Future(new QueryMessage(SELECT_COLUMNS)); + connection.write(ksFuture); + connection.write(cfFuture); + connection.write(colsFuture); // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified cluster.metadata.rebuildSchema(ksFuture.get(), cfFuture.get(), colsFuture.get()); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java index c048e576f00..18775fc4e59 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java @@ -9,7 +9,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class ReconnectionHandler implements Runnable { +class ReconnectionHandler implements Runnable { private static final Logger logger = LoggerFactory.getLogger(ReconnectionHandler.class); @@ -18,7 +18,8 @@ public class ReconnectionHandler implements Runnable { private final Connection.Factory factory; // The next delay in milliseconds - // TODO: implements something better than "every 3 seconds" + // TODO: Implements something better than "every 3 seconds" + // TODO: And also evict the node after some (long) time private int nextDelay = 3000; public ReconnectionHandler(Host host, ScheduledExecutorService executor, Connection.Factory factory) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 0ece986e6f6..4dc4a11c6dd 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -141,9 +141,18 @@ public String toString() { public static class Future extends SimpleFuture implements Connection.ResponseCallback { - Future() {} + private final Message.Request request; + + Future(Message.Request request) { + this.request = request; + } + + // TODO: We don't really want to expose that :( + // (Nor onSet/onException if we can avoid it) + public Message.Request request() { + return request; + } - @Override public void onSet(Message.Response response) { try { switch (response.type) { @@ -164,7 +173,6 @@ public void onSet(Message.Response response) { } } - @Override public void onException(Exception exception) { super.setException(exception); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 20251b297e8..885e6732bed 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -1,36 +1,100 @@ package com.datastax.driver.core; +import java.util.Iterator; +import java.util.concurrent.TimeUnit; + import com.datastax.driver.core.transport.*; +import com.datastax.driver.core.pool.HostConnectionPool; import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.ErrorMessage; -import org.apache.cassandra.exceptions.*; +import org.apache.cassandra.transport.messages.ExecuteMessage; +import org.apache.cassandra.transport.messages.QueryMessage; +import org.apache.cassandra.exceptions.UnavailableException; +import org.apache.cassandra.exceptions.ReadTimeoutException; +import org.apache.cassandra.exceptions.WriteTimeoutException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +/** + * Connection callback that handle retrying another node if the connection fails. + * + * For queries, this also handle retrying the query if the RetryPolicy say so. + */ class RetryingCallback implements Connection.ResponseCallback { private static final Logger logger = LoggerFactory.getLogger(RetryingCallback.class); private final Session.Manager manager; - private final Message.Request request; private final Connection.ResponseCallback callback; - private volatile int retries; + private final Iterator queryPlan; + private volatile Host current; + + private final boolean isQuery; + private volatile int queryRetries; - public RetryingCallback(Session.Manager manager, Message.Request request, Connection.ResponseCallback callback) { + public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback) { this.manager = manager; - this.request = request; this.callback = callback; + + this.queryPlan = manager.loadBalancer.newQueryPlan(); + this.isQuery = request() instanceof QueryMessage || request() instanceof ExecuteMessage; + } + + public void sendRequest() { + + while (queryPlan.hasNext()) { + Host host = queryPlan.next(); + if (query(host)) + return; + } + // TODO: Change that to a "NoAvailableHostException" + callback.onException(new RuntimeException()); + } + + private boolean query(Host host) { + HostConnectionPool pool = manager.pools.get(host); + if (pool == null || pool.isShutdown()) + return false; + + try { + Connection connection = pool.borrowConnection(manager.DEFAULT_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); + current = host; + try { + connection.write(this); + return true; + } finally { + pool.returnConnection(connection); + } + } catch (ConnectionException e) { + logger.trace("Error: " + e.getMessage()); + // If we have any problem with the connection, just move to the next node. + return false; + } + } + + private void retry(final boolean retryCurrent) { + final Host h = current; + + // We should not retry on the current thread as this will be an IO thread. + manager.cluster.manager.executor.execute(new Runnable() { + public void run() { + if (retryCurrent) { + if (query(h)) + return; + } + sendRequest(); + } + }); } - public Message.Request getRequest() { - return request; + public Message.Request request() { + return callback.request(); } - @Override public void onSet(Message.Response response) { switch (response.type) { case RESULT: @@ -45,36 +109,35 @@ public void onSet(Message.Response response) { assert err.error instanceof ReadTimeoutException; ReadTimeoutException rte = (ReadTimeoutException)err.error; ConsistencyLevel rcl = ConsistencyLevel.from(rte.consistency); - retry = manager.retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, retries); + retry = manager.retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, queryRetries); break; case WRITE_TIMEOUT: assert err.error instanceof WriteTimeoutException; WriteTimeoutException wte = (WriteTimeoutException)err.error; ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); - retry = manager.retryPolicy.onWriteTimeout(wcl, wte.received, wte.blockFor, retries); + retry = manager.retryPolicy.onWriteTimeout(wcl, wte.received, wte.blockFor, queryRetries); break; case UNAVAILABLE: assert err.error instanceof UnavailableException; UnavailableException ue = (UnavailableException)err.error; ConsistencyLevel ucl = ConsistencyLevel.from(ue.consistency); - retry = manager.retryPolicy.onUnavailable(ucl, ue.required, ue.alive, retries); + retry = manager.retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); break; case OVERLOADED: // TODO: maybe we could make that part of the retrying policy? - // retry once - if (retries == 0) + if (queryRetries == 0) retry = true; break; case IS_BOOTSTRAPPING: // TODO: log error as this shouldn't happen // retry once - if (retries == 0) + if (queryRetries == 0) retry = true; break; } if (retry) { - ++retries; - manager.retry(this); + ++queryRetries; + retry(true); } else { callback.onSet(response); } @@ -86,8 +149,15 @@ public void onSet(Message.Response response) { } } - @Override public void onException(Exception exception) { + + if (exception instanceof ConnectionException) { + logger.debug(String.format("Error sending request to %s, retrying with next host", ((ConnectionException)exception).address)); + retry(false); + return; + } + callback.onException(exception); } + } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 953a4e56129..43e64380b24 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -107,7 +107,7 @@ public ResultSet execute(CQLQuery query) { * be empty and will be for any non SELECT query. */ public ResultSet.Future executeAsync(String query) { - return manager.executeWithRetry(new QueryMessage(query)); + return manager.executeQuery(new QueryMessage(query)); } /** @@ -134,8 +134,8 @@ public ResultSet.Future executeAsync(CQLQuery query) { public PreparedStatement prepare(String query) { // TODO: Deal with exceptions try { - Connection.Future future = new Connection.Future(); - manager.execute(new PrepareMessage(query), future); + Connection.Future future = new Connection.Future(new PrepareMessage(query)); + manager.execute(future); return toPreparedStatement(future); } catch (Exception e) { throw new RuntimeException(e); @@ -188,7 +188,7 @@ public ResultSet executePrepared(BoundStatement stmt) { * be empty and will be for any non SELECT query. */ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { - return manager.executeWithRetry(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); + return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); } private PreparedStatement toPreparedStatement(Connection.Future future) { @@ -214,10 +214,10 @@ private PreparedStatement toPreparedStatement(Connection.Future future) { static class Manager implements Host.StateListener { - private final Cluster cluster; + final Cluster cluster; - private final ConcurrentMap pools; - private final LoadBalancingPolicy loadBalancer; + final ConcurrentMap pools; + final LoadBalancingPolicy loadBalancer; // TODO: make that configurable final RetryPolicy retryPolicy = RetryPolicy.DefaultPolicy.INSTANCE; @@ -225,7 +225,7 @@ static class Manager implements Host.StateListener { private final HostConnectionPool.Configuration poolsConfiguration; // TODO: Make that configurable - private final long DEFAULT_CONNECTION_TIMEOUT = 3000; + final long DEFAULT_CONNECTION_TIMEOUT = 3000; public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; @@ -286,54 +286,17 @@ public void setKeyspace(String keyspace) { /** * Execute the provided request. * - * This method will find a suitable node to connect to using the {@link LoadBalancingPolicy} - * and handle host failover. - * - * @return a future on the response to the request. + * This method will find a suitable node to connect to using the + * {@link LoadBalancingPolicy} and handle host failover. */ - public void execute(Message.Request msg, Connection.ResponseCallback callback) { - - Iterator plan = loadBalancer.newQueryPlan(); - while (plan.hasNext()) { - Host host = plan.next(); - HostConnectionPool pool = pools.get(host); - if (pool == null || pool.isShutdown()) - continue; - - try { - Connection connection = pool.borrowConnection(DEFAULT_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); - try { - connection.write(msg, callback); - return; - } finally { - pool.returnConnection(connection); - } - } catch (ConnectionException e) { - logger.trace("Error: " + e.getMessage()); - // If we have any problem with the connection, just move to the next node. - // If that happens during the write of the request, the pool act on the error during returnConnection. - } - } - // TODO: Change that to a "NoAvailableHostException" - callback.onException(new RuntimeException()); + public void execute(Connection.ResponseCallback callback) { + new RetryingCallback(this, callback).sendRequest(); } - // TODO: this will need to evolve a bit for async calls - public ResultSet.Future executeWithRetry(Message.Request msg) { - ResultSet.Future future = new ResultSet.Future(); - execute(msg, new RetryingCallback(this, msg, future)); + public ResultSet.Future executeQuery(Message.Request msg) { + ResultSet.Future future = new ResultSet.Future(msg); + execute(future); return future; } - - // TODO: This doesn't work for prepared statement, fix it. - public void retry(final RetryingCallback retryCallback) { - // TODO: retry callback on executor (to avoid doing write on IO - // thread) - cluster.manager.executor.execute(new Runnable() { - public void run() { - execute(retryCallback.getRequest(), retryCallback); - } - }); - } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index f3f4e47444f..8d0ec136c99 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -188,12 +188,14 @@ public void setKeyspace(String keyspace) throws ConnectionException { * @throws TransportException if an I/O error while sending the request */ public Future write(Message.Request request) throws ConnectionException { - Future future = new Future(); - write(request, future); + Future future = new Future(request); + write(future); return future; } - public void write(Message.Request request, ResponseCallback callback) throws ConnectionException { + public void write(ResponseCallback callback) throws ConnectionException { + + Message.Request request = callback.request(); if (isDefunct) throw new ConnectionException(address, "Write attempt on defunct connection"); @@ -217,6 +219,9 @@ public void write(Message.Request request, ResponseCallback callback) throws Con if (!writeFuture.isSuccess()) { logger.debug(String.format("[%s] Error writting request %s", name, request)); + // Remove this handler from the dispatcher so it don't get notified of the error + // twice (we will fail that method already) + dispatcher.removeHandler(handler.streamId); ConnectionException ce; if (writeFuture.getCause() instanceof java.nio.channels.ClosedChannelException) { @@ -333,6 +338,10 @@ public void add(ResponseHandler handler) { assert old == null; } + public void removeHandler(int streamId) { + pending.remove(streamId); + } + @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { logger.trace(String.format("[%s] received ", e.getMessage())); @@ -382,18 +391,27 @@ public void errorOutAllHandler(ConnectionException ce) { // TODO: Do we really need that after all? public static class Future extends SimpleFuture implements ResponseCallback { - @Override + private final Message.Request request; + + public Future(Message.Request request) { + this.request = request; + } + + public Message.Request request() { + return request; + } + public void onSet(Message.Response response) { super.set(response); } - @Override public void onException(Exception exception) { super.setException(exception); } } public interface ResponseCallback { + public Message.Request request(); public void onSet(Message.Response response); public void onException(Exception exception); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index b545590a80b..d59def5df9e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -98,53 +98,58 @@ public static void classSetUp() { // assertEquals(0.2, r.getFloat("f"), 0.01); //} + //@Test + //public void CollectionsTest() throws Exception { + + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + // Session session = cluster.connect(); + + // try { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // // We should deal with that sleep + // try { Thread.sleep(1000); } catch (Exception e) {} + // session.use("test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + // } catch (Exception e) { + // // Skip if already created + // session.use("test_ks"); + // } + + // session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); + // for (CQLRow row : session.execute("SELECT * FROM test")) { + // List l = (List)row.getList("l"); + // Set s = (Set)row.getSet("s"); + // Map m = (Map)row.getMap("m"); + + // System.out.println("l = " + l); + // System.out.println("s = " + s); + // System.out.println("m = " + m); + // } + //} + @Test - public void CollectionsTest() throws Exception { + public void MultiNodeContinuousExecuteTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); Session session = cluster.connect(); try { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); // We should deal with that sleep try { Thread.sleep(1000); } catch (Exception e) {} session.use("test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); } catch (Exception e) { - // Skip if already created + // Skip if already exists session.use("test_ks"); } - session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); - for (CQLRow row : session.execute("SELECT * FROM test")) { - List l = (List)row.getList("l"); - Set s = (Set)row.getSet("s"); - Map m = (Map)row.getMap("m"); + //System.out.println("--- Schema ---\n" + cluster.getMetadata()); - System.out.println("l = " + l); - System.out.println("s = " + s); - System.out.println("m = " + m); + for (int i = 0; i < 10000; ++i) { + System.out.println(">> " + i); + session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + Thread.currentThread().sleep(1000); } } - - //@Test - //public void MultiNodeContinuousExecuteTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); - // Session session = cluster.connect(); - - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); - // // We should deal with that sleep - // try { Thread.sleep(1000); } catch (Exception e) {} - // session.use("test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - - // //System.out.println("--- Schema ---\n" + cluster.getMetadata()); - - // for (int i = 0; i < 10000; ++i) { - // System.out.println(">> " + i); - // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - // Thread.currentThread().sleep(1000); - // } - //} } From 422d557d3f34745caf2411b98d57957893ed9db7 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 21 Sep 2012 13:19:52 +0200 Subject: [PATCH 028/719] Improve error handling --- .../com/datastax/driver/core/Cluster.java | 2 - .../driver/core/ControlConnection.java | 1 + .../com/datastax/driver/core/ResultSet.java | 46 ++++++++++++--- .../driver/core/RetryingCallback.java | 22 +++++-- .../exceptions/AlreadyExistsException.java | 59 +++++++++++++++++++ .../core/exceptions/DriverException.java | 23 ++++++++ .../core/exceptions/DriverInternalError.java | 27 +++++++++ .../exceptions/DriverUncheckedException.java | 23 ++++++++ .../exceptions/InvalidQueryException.java | 11 ++++ .../exceptions/NoHostAvailableException.java | 43 ++++++++++++++ .../exceptions/QueryExecutionException.java | 14 +++++ .../exceptions/QueryTimeoutException.java | 36 +++++++++++ .../exceptions/QueryValidationException.java | 12 ++++ .../core/exceptions/ReadTimeoutException.java | 28 +++++++++ .../driver/core/exceptions/SyntaxError.java | 11 ++++ .../core/exceptions/TruncateException.java | 14 +++++ .../exceptions/UnauthorizedException.java | 12 ++++ .../core/exceptions/UnavailableException.java | 33 +++++++++++ .../exceptions/WriteTimeoutException.java | 16 +++++ 19 files changed, 419 insertions(+), 14 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 112077d498a..6870e7d02b1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -406,8 +406,6 @@ public void run() { } } }); - } - } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 862fb24a05e..0fc4e968665 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -62,6 +62,7 @@ public void tryConnect(Host host) throws ConnectionException { List evs = Arrays.asList(new Event.Type[]{ Event.Type.TOPOLOGY_CHANGE, Event.Type.STATUS_CHANGE, + Event.Type.SCHEMA_CHANGE, }); connection.write(new RegisterMessage(evs)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 4dc4a11c6dd..12d26a538b1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -10,6 +10,7 @@ import org.apache.cassandra.transport.messages.ErrorMessage; import org.apache.cassandra.transport.messages.ResultMessage; +import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.utils.SimpleFuture; @@ -53,7 +54,8 @@ static ResultSet fromMessage(ResultMessage msg) { case PREPARED: throw new RuntimeException("Prepared statement received when a ResultSet was expected"); default: - throw new AssertionError(); + logger.error(String.format("Received unknow result type '%s'; returning empty result set", msg.kind)); + return EMPTY; } } @@ -180,12 +182,42 @@ public void onException(Exception exception) { // TODO: Convert to some internal exception private Exception convertException(org.apache.cassandra.exceptions.TransportException te) { - if (te instanceof ServerError) { - return new RuntimeException("An unexpected error occured server side: " + te.getMessage()); - } else if (te instanceof ProtocolException) { - return new RuntimeException("An unexpected protocol error occured. This is a bug in this library, please report: " + te.getMessage()); - } else { - return new RuntimeException(te.getMessage()); + switch (te.code()) { + case SERVER_ERROR: + return new DriverInternalError("An unexpected error occured server side: " + te.getMessage()); + case PROTOCOL_ERROR: + return new DriverInternalError("An unexpected protocol error occured. This is a bug in this library, please report: " + te.getMessage()); + case UNAVAILABLE: + org.apache.cassandra.exceptions.UnavailableException ue = (org.apache.cassandra.exceptions.UnavailableException)te; + return new UnavailableException(ConsistencyLevel.from(ue.consistency), ue.required, ue.alive); + case OVERLOADED: + // TODO: Catch that so that we retry another node + return new DriverInternalError("Queried host was overloaded; this shouldn't happen, another node should have been tried"); + case IS_BOOTSTRAPPING: + // TODO: Catch that so that we retry another node + return new DriverInternalError("Queried host was boostrapping; this shouldn't happen, another node should have been tried"); + case TRUNCATE_ERROR: + return new TruncateException(te.getMessage()); + case WRITE_TIMEOUT: + org.apache.cassandra.exceptions.WriteTimeoutException wte = (org.apache.cassandra.exceptions.WriteTimeoutException)te; + return new WriteTimeoutException(ConsistencyLevel.from(wte.consistency), wte.received, wte.blockFor); + case READ_TIMEOUT: + org.apache.cassandra.exceptions.ReadTimeoutException rte = (org.apache.cassandra.exceptions.ReadTimeoutException)te; + return new ReadTimeoutException(ConsistencyLevel.from(rte.consistency), rte.received, rte.blockFor, rte.dataPresent); + case SYNTAX_ERROR: + return new SyntaxError(te.getMessage()); + case UNAUTHORIZED: + return new UnauthorizedException(te.getMessage()); + case INVALID: + return new InvalidQueryException(te.getMessage()); + case CONFIG_ERROR: + // TODO: I don't know if it's worth having a specific exception for that + return new InvalidQueryException(te.getMessage()); + case ALREADY_EXISTS: + org.apache.cassandra.exceptions.AlreadyExistsException aee = (org.apache.cassandra.exceptions.AlreadyExistsException)te; + return new AlreadyExistsException(aee.ksName, aee.cfName); + default: + return new DriverInternalError("Unknown error return code: " + te.code()); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 885e6732bed..fcb9f877f1b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -1,8 +1,12 @@ package com.datastax.driver.core; +import java.net.InetSocketAddress; import java.util.Iterator; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.TimeUnit; +import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.transport.*; import com.datastax.driver.core.pool.HostConnectionPool; import com.datastax.driver.core.utils.SimpleFuture; @@ -36,6 +40,8 @@ class RetryingCallback implements Connection.ResponseCallback { private final boolean isQuery; private volatile int queryRetries; + private volatile Map errors; + public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback) { this.manager = manager; this.callback = callback; @@ -51,8 +57,7 @@ public void sendRequest() { if (query(host)) return; } - // TODO: Change that to a "NoAvailableHostException" - callback.onException(new RuntimeException()); + callback.onException(new NoHostAvailableException(errors)); } private boolean query(Host host) { @@ -70,12 +75,19 @@ private boolean query(Host host) { pool.returnConnection(connection); } } catch (ConnectionException e) { - logger.trace("Error: " + e.getMessage()); - // If we have any problem with the connection, just move to the next node. + // If we have any problem with the connection, move to the next node. + logError(e); return false; } } + private void logError(ConnectionException e) { + logger.debug(String.format("Error querying %s, trying next host (error is: %s)", e.address, e.getMessage())); + if (errors == null) + errors = new HashMap(); + errors.put(e.address, e.getMessage()); + } + private void retry(final boolean retryCurrent) { final Host h = current; @@ -152,7 +164,7 @@ public void onSet(Message.Response response) { public void onException(Exception exception) { if (exception instanceof ConnectionException) { - logger.debug(String.format("Error sending request to %s, retrying with next host", ((ConnectionException)exception).address)); + logError((ConnectionException)exception); retry(false); return; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java new file mode 100644 index 00000000000..ad4df45a35e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java @@ -0,0 +1,59 @@ +package com.datastax.driver.core.exceptions; + +/** + * Exception thrown when a query attemps to create a keyspace or table that already exists. + */ +public class AlreadyExistsException extends QueryValidationException { + + private final String keyspace; + private final String table; + + public AlreadyExistsException(String keyspace, String table) { + super(makeMsg(keyspace, table)); + this.keyspace = keyspace; + this.table = table; + } + + private static String makeMsg(String keyspace, String table) { + if (table.isEmpty()) + return String.format("Keyspace %s already exists", keyspace); + else + return String.format("Table %s.%s already exists", keyspace, table); + } + + /** + * Returns whether the query yielding this exception was a table creation + * attempt. + * + * @return {@code true} if this exception is raised following a table + * creation attempt, {@code false} if it was a keyspace creation attempt. + */ + public boolean wasTableCreation() { + return !table.isEmpty(); + } + + /** + * The name of keyspace that either already exists or is home to the table + * that already exists. + * + * @return a keyspace name that is either the keyspace whose creation + * attempt failed because a keyspace of the same name already exists (in + * that case, {@link #table} will return {@code null}), or the keyspace of + * the table creation attempt (in which case {@link #table} will return the + * name of said table). + */ + public String keyspace() { + return keyspace; + } + + /** + * If the failed creation was a table creation, the name of the table that already exists. + * + * @return the name of table whose creation attempt failed because a table + * of this name already exists, or {@code null} if the query was a keyspace + * creation query. + */ + public String table() { + return table.isEmpty() ? null : table; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java new file mode 100644 index 00000000000..1be49775ea6 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java @@ -0,0 +1,23 @@ +package com.datastax.driver.core.exceptions; + +/** + * Top level class for (checked) exceptions thrown by the driver. + */ +public class DriverException extends Exception { + + public DriverException() { + super(); + } + + public DriverException(String message) { + super(message); + } + + public DriverException(Throwable cause) { + super(cause); + } + + public DriverException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java new file mode 100644 index 00000000000..1a3855a8335 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java @@ -0,0 +1,27 @@ +package com.datastax.driver.core.exceptions; + +/** + * An unexpected error happened internally. + * + * This should never be raise and indicates a bug (either in the driver or in + * Cassandra). + */ +public class DriverInternalError extends RuntimeException { + + public DriverInternalError() { + super(); + } + + public DriverInternalError(String message) { + super(message); + } + + public DriverInternalError(Throwable cause) { + super(cause); + } + + public DriverInternalError(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java new file mode 100644 index 00000000000..ba2a01a2a9f --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java @@ -0,0 +1,23 @@ +package com.datastax.driver.core.exceptions; + +/** + * Top level class for unchecked exceptions thrown by the driver. + */ +public class DriverUncheckedException extends Exception { + + public DriverUncheckedException() { + super(); + } + + public DriverUncheckedException(String message) { + super(message); + } + + public DriverUncheckedException(Throwable cause) { + super(cause); + } + + public DriverUncheckedException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java new file mode 100644 index 00000000000..366cae04265 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java @@ -0,0 +1,11 @@ +package com.datastax.driver.core.exceptions; + +/** + * Indicates a syntaxcally correct but invalid query. + */ +public class InvalidQueryException extends QueryValidationException { + + public InvalidQueryException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java new file mode 100644 index 00000000000..51732b4f45b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java @@ -0,0 +1,43 @@ +package com.datastax.driver.core.exceptions; + +import java.net.InetSocketAddress; +import java.util.HashMap; +import java.util.Map; + +/** + * Exception thrown when a query cannot be performed because no host are + * available. + * + * This exception is thrown if + *

    + *
  • either there is no host live in the cluster at the moment of the query
  • + *
  • all host that have been tried have failed due to a connection problem
  • + *
+ * + * For debugging purpose, the list of hosts that have been tried along with the + * failure cause can be retrieved using the {@link #errors} method. + */ +public class NoHostAvailableException extends DriverException { + + private final Map errors; + + public NoHostAvailableException(Map errors) { + this.errors = errors; + } + + /** + * Return the hosts tried along with descriptions of the error encountered + * while trying them. + * + * @return a map containing for each tried host a description of the error + * triggered when trying it. + */ + public Map errors() { + return new HashMap(errors); + } + + @Override + public String getMessage() { + return String.format("All host tried for query are in error (tried: %s)", errors.keySet()); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java new file mode 100644 index 00000000000..fb4c7c6dd7e --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java @@ -0,0 +1,14 @@ +package com.datastax.driver.core.exceptions; + +/** + * Exception related to the execution of a query. + * + * This correspond to the exception that Cassandra throw when a (valid) query + * cannot be executed (TimeoutException, UnavailableException, ...). + */ +public class QueryExecutionException extends DriverException { + + protected QueryExecutionException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java new file mode 100644 index 00000000000..4c5d5ecfaa9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java @@ -0,0 +1,36 @@ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * A Cassandra timeout during a query. + * + * Such an exception is returned when the query has been tried by Cassandra but + * cannot be achieved with the requested consistency level within the rpc + * timeout set for Cassandra. + */ +public class QueryTimeoutException extends QueryExecutionException { + + private final ConsistencyLevel consistency; + private final int received; + private final int required; + + protected QueryTimeoutException(String msg, ConsistencyLevel consistency, int received, int required) { + super(msg); + this.consistency = consistency; + this.received = received; + this.required = required; + } + + public ConsistencyLevel consistencyLevel() { + return consistency; + } + + public int receivedAcknowledgements() { + return received; + } + + public int requiredAcknowledgements() { + return required; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java new file mode 100644 index 00000000000..94aa8335a19 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java @@ -0,0 +1,12 @@ +package com.datastax.driver.core.exceptions; + +/** + * An exception indicating that a query cannot be executed because it is + * incorrect syntaxically, invalid, unauthorized or any other reason. + */ +public class QueryValidationException extends DriverUncheckedException { + + protected QueryValidationException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java new file mode 100644 index 00000000000..8c9815ccc26 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java @@ -0,0 +1,28 @@ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * A Cassandra timeout during a read query. + */ +public class ReadTimeoutException extends QueryTimeoutException { + + private final boolean dataPresent; + + public ReadTimeoutException(ConsistencyLevel consistency, int received, int required, boolean dataPresent) { + super(String.format("Cassandra timeout during read query at consistency %s (%s)", consistency, formatDetails(received, required, dataPresent)), + consistency, + received, + required); + this.dataPresent = dataPresent; + } + + private static String formatDetails(int received, int required, boolean dataPresent) { + if (received < required) + return String.format("%d replica responded over %d required", received, required); + else if (!dataPresent) + return String.format("the replica queried for data didn't responded"); + else + return String.format("timeout while waiting for repair of inconsistent replica"); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java new file mode 100644 index 00000000000..dd5335cdcf1 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/SyntaxError.java @@ -0,0 +1,11 @@ +package com.datastax.driver.core.exceptions; + +/** + * Indicates a syntax error in a query. + */ +public class SyntaxError extends QueryValidationException { + + public SyntaxError(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java new file mode 100644 index 00000000000..282e4c6add3 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java @@ -0,0 +1,14 @@ +package com.datastax.driver.core.exceptions; + +/** + * Error during a truncation operation. + */ +// TODO: should that extend QueryExecutionException. In theory yes, but that's +// probably not part of what you want to deal with when you catch +// QueryExecutionException? +public class TruncateException extends DriverException { + + public TruncateException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java new file mode 100644 index 00000000000..d1b5e710c5a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnauthorizedException.java @@ -0,0 +1,12 @@ +package com.datastax.driver.core.exceptions; + +/** + * Indicates that a query cannot be performed due to the authorisation + * restrictions of the logged user. + */ +public class UnauthorizedException extends QueryValidationException { + + public UnauthorizedException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java new file mode 100644 index 00000000000..603580ee2a2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java @@ -0,0 +1,33 @@ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * Exception thrown when the coordinator knows there is not enough replica + * alive to perform a query with the requested consistency level. + */ +public class UnavailableException extends QueryExecutionException { + + private final ConsistencyLevel consistency; + private final int required; + private final int alive; + + public UnavailableException(ConsistencyLevel consistency, int required, int alive) { + super(String.format("Not enough replica available for query at consistency %s (%d required but only %d alive)", consistency, required, alive)); + this.consistency = consistency; + this.required = required; + this.alive = alive; + } + + public ConsistencyLevel consistency() { + return consistency; + } + + public int requiredReplicas() { + return required; + } + + public int aliveReplicas() { + return alive; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java new file mode 100644 index 00000000000..66d16a8803a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java @@ -0,0 +1,16 @@ +package com.datastax.driver.core.exceptions; + +import com.datastax.driver.core.ConsistencyLevel; + +/** + * A Cassandra timeout during a write query. + */ +public class WriteTimeoutException extends QueryTimeoutException { + + public WriteTimeoutException(ConsistencyLevel consistency, int received, int required) { + super(String.format("Cassandra timeout during write query at consitency %s (%d replica acknowledged the write over %d required)", consistency, received, required), + consistency, + received, + required); + } +} From 38b76830b0c031fc890b7c087b48e1be8eee967e Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 26 Sep 2012 17:56:26 +0200 Subject: [PATCH 029/719] Better handling of setting the current keyspace --- .../com/datastax/driver/core/Cluster.java | 19 ++++++++-- .../datastax/driver/core/ClusterMetadata.java | 5 +-- .../driver/core/ControlConnection.java | 8 ++-- .../driver/core/PreparedStatement.java | 6 +-- .../com/datastax/driver/core/ResultSet.java | 18 +++++++-- .../com/datastax/driver/core/Session.java | 37 ++++++++++--------- .../exceptions/DriverUncheckedException.java | 2 +- .../driver/core/transport/Connection.java | 18 ++++----- 8 files changed, 64 insertions(+), 49 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 6870e7d02b1..620332eee09 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -10,6 +10,7 @@ import org.apache.cassandra.transport.messages.EventMessage; import org.apache.cassandra.transport.messages.QueryMessage; +import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; import com.datastax.driver.core.utils.SimpleConvictionPolicy; @@ -95,9 +96,14 @@ public Session connect(AuthInfo authInfo) { * {@code Session}. This can be later changed using {@link Session#use}. * @return a new session on this cluster sets to keyspace * {@code keyspaceName}. + * + * @throws NoHostAvailableException if no host can be contacted to set the + * {@code keyspace}. */ - public Session connect(String keyspace) { - return connect().use(keyspace); + public Session connect(String keyspace) throws NoHostAvailableException { + Session session = connect(); + session.manager.setKeyspace(keyspace); + return session; } /** @@ -107,9 +113,14 @@ public Session connect(String keyspace) { * Cassandra nodes. * @return a new session on this cluster sets to keyspace * {@code keyspaceName}. + * + * @throws NoHostAvailableException if no host can be contacted to set the + * {@code keyspace}. */ - public Session connect(String keyspace, AuthInfo authInfo) { - return connect(authInfo).use(keyspace); + public Session connect(String keyspace, AuthInfo authInfo) throws NoHostAvailableException { + Session session = connect(authInfo); + session.manager.setKeyspace(keyspace); + return session; } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 03efc807cc7..23e32d2a18f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -12,7 +12,7 @@ public class ClusterMetadata { private final Cluster.Manager cluster; private final ConcurrentMap hosts = new ConcurrentHashMap(); - private final Map keyspaces = new HashMap(); + private final ConcurrentMap keyspaces = new ConcurrentHashMap(); ClusterMetadata(Cluster.Manager cluster) { this.cluster = cluster; @@ -20,8 +20,6 @@ public class ClusterMetadata { void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { - // TODO: we need to switch the keyspaces map completely - Map> cfDefs = new HashMap>(); Map>> colsDefs = new HashMap>>(); @@ -68,7 +66,6 @@ void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { } } } - keyspaces.put(ksName, ksm); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 0fc4e968665..e966a9e4baf 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -62,7 +62,7 @@ public void tryConnect(Host host) throws ConnectionException { List evs = Arrays.asList(new Event.Type[]{ Event.Type.TOPOLOGY_CHANGE, Event.Type.STATUS_CHANGE, - Event.Type.SCHEMA_CHANGE, + //Event.Type.SCHEMA_CHANGE, }); connection.write(new RegisterMessage(evs)); @@ -78,9 +78,9 @@ public void refreshSchema() { // Make sure we're up to date on metadata try { - ResultSet.Future ksFuture = new ResultSet.Future(new QueryMessage(SELECT_KEYSPACES)); - ResultSet.Future cfFuture = new ResultSet.Future(new QueryMessage(SELECT_COLUMN_FAMILIES)); - ResultSet.Future colsFuture = new ResultSet.Future(new QueryMessage(SELECT_COLUMNS)); + ResultSet.Future ksFuture = new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES)); + ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES)); + ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS)); connection.write(ksFuture); connection.write(cfFuture); connection.write(colsFuture); diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index d1b89ca7b3b..6674f0c6eb6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -14,9 +14,9 @@ public class PreparedStatement { final Columns metadata; - final int id; + final byte[] id; - private PreparedStatement(Columns metadata, int id) { + private PreparedStatement(Columns metadata, byte[] id) { this.metadata = metadata; this.id = id; } @@ -29,7 +29,7 @@ static PreparedStatement fromMessage(ResultMessage msg) { for (int i = 0; i < defs.length; i++) defs[i] = Columns.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); - return new PreparedStatement(new Columns(defs), pmsg.statementId); + return new PreparedStatement(new Columns(defs), pmsg.statementId.bytes); case VOID: case ROWS: case SET_KEYSPACE: diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 12d26a538b1..0382ec4e512 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -37,7 +37,7 @@ private ResultSet(Columns metadata, Queue> rows) { this.rows = rows; } - static ResultSet fromMessage(ResultMessage msg) { + private static ResultSet fromMessage(ResultMessage msg) { switch (msg.kind) { case VOID: return EMPTY; @@ -49,7 +49,6 @@ static ResultSet fromMessage(ResultMessage msg) { return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); case SET_KEYSPACE: - // TODO: we might want to do more with such result return EMPTY; case PREPARED: throw new RuntimeException("Prepared statement received when a ResultSet was expected"); @@ -143,9 +142,11 @@ public String toString() { public static class Future extends SimpleFuture implements Connection.ResponseCallback { + private final Session.Manager session; private final Message.Request request; - Future(Message.Request request) { + Future(Session.Manager session, Message.Request request) { + this.session = session; this.request = request; } @@ -159,7 +160,16 @@ public void onSet(Message.Response response) { try { switch (response.type) { case RESULT: - super.set(ResultSet.fromMessage((ResultMessage)response)); + ResultMessage rm = (ResultMessage)response; + if (rm.kind == ResultMessage.Kind.SET_KEYSPACE) { + // TODO: I think there is a problem if someone set + // a keyspace, then drop it. But that basically + // means we should reset the keyspace to null in that case. + + // propagate the keyspace change to other connections + session.poolsConfiguration.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); + } + super.set(ResultSet.fromMessage(rm)); break; case ERROR: super.setException(convertException(((ErrorMessage)response).error)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 43e64380b24..66eba1c1e3c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -4,6 +4,7 @@ import java.util.*; import java.util.concurrent.*; +import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.pool.HostConnectionPool; import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; @@ -37,20 +38,6 @@ public class Session { this.manager = new Manager(cluster, hosts); } - /** - * Sets the current keyspace to use for this session. - * - * Note that it is up to the application to synchronize calls to this - * method with queries executed against this session. - * - * @param keyspace the name of the keyspace to set - * @return this session. - */ - public Session use(String keyspace) { - manager.setKeyspace(keyspace); - return this; - } - /** * Execute the provided query. * @@ -222,7 +209,7 @@ static class Manager implements Host.StateListener { // TODO: make that configurable final RetryPolicy retryPolicy = RetryPolicy.DefaultPolicy.INSTANCE; - private final HostConnectionPool.Configuration poolsConfiguration; + final HostConnectionPool.Configuration poolsConfiguration; // TODO: Make that configurable final long DEFAULT_CONNECTION_TIMEOUT = 3000; @@ -279,8 +266,22 @@ public void onRemove(Host host) { pool.shutdown(); } - public void setKeyspace(String keyspace) { - poolsConfiguration.setKeyspace(keyspace); + public void setKeyspace(String keyspace) throws NoHostAvailableException { + try { + executeQuery(new QueryMessage("use " + keyspace)).get(); + } catch (InterruptedException e) { + // TODO: do we want to handle interrupted exception in a better way? + throw new DriverInternalError("Hey! I was waiting!", e); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + // A USE query should never fail unless we cannot contact a node + if (cause instanceof NoHostAvailableException) + throw (NoHostAvailableException)cause; + else if (cause instanceof DriverUncheckedException) + throw (DriverUncheckedException)cause; + else + throw new DriverInternalError("Unexpected exception thrown", cause); + } } /** @@ -294,7 +295,7 @@ public void execute(Connection.ResponseCallback callback) { } public ResultSet.Future executeQuery(Message.Request msg) { - ResultSet.Future future = new ResultSet.Future(msg); + ResultSet.Future future = new ResultSet.Future(this, msg); execute(future); return future; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java index ba2a01a2a9f..293cbf364c7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java @@ -3,7 +3,7 @@ /** * Top level class for unchecked exceptions thrown by the driver. */ -public class DriverUncheckedException extends Exception { +public class DriverUncheckedException extends RuntimeException { public DriverUncheckedException() { super(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 8d0ec136c99..811f570e43c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -151,23 +151,19 @@ public void setKeyspace(String keyspace) throws ConnectionException { try { logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); - // TODO: Handle the case where we get an error because the keyspace doesn't - // exist (and don't set the keyspace to retry later) Message.Response response = write(new QueryMessage("USE " + keyspace)).get(); switch (response.type) { case RESULT: this.keyspace = keyspace; break; - case ERROR: - // TODO: what to do when that happens? It could be that the - // node doesn't know about that keyspace even though it - // exists (new node not yet up on schemas?) - logger.debug(String.format("Cannot set keyspace %s (%s)", keyspace, response)); - break; default: - // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - logger.info("Got " + response); - return; + // The code set the keyspace only when a successful 'use' + // has been perform, so there shouldn't be any error here. + // It can happen however that the node we're connecting to + // is not up on the schema yet. In that case, defuncting + // the connection is not a bad choice. + defunct(new ConnectionException(address, String.format("Problem while setting keyspace, got %s as response", response))); + break; } } catch (ConnectionException e) { throw defunct(e); From f496173ed367230da170777436f38e0ae9629920 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 26 Sep 2012 19:19:20 +0200 Subject: [PATCH 030/719] Handle exception during queries --- .../com/datastax/driver/core/AuthInfo.java | 9 -- .../com/datastax/driver/core/Cluster.java | 17 ++-- .../datastax/driver/core/ClusterMetadata.java | 1 + .../com/datastax/driver/core/ResultSet.java | 91 +++++++++++++++++++ .../com/datastax/driver/core/Session.java | 23 +++-- .../core/exceptions/TruncateException.java | 2 +- 6 files changed, 116 insertions(+), 27 deletions(-) delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/AuthInfo.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AuthInfo.java b/driver-core/src/main/java/com/datastax/driver/core/AuthInfo.java deleted file mode 100644 index 6675747c470..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/AuthInfo.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.datastax.driver.core; - -/** - * Authentication information to connect to a Cassandra node. - * - * TODO (and define what this is in particular) - */ -public class AuthInfo { -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 620332eee09..2818f5c9875 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -84,10 +84,10 @@ public Session connect() { * Cassandra nodes. * @return a new session on this cluster sets to no keyspace. */ - public Session connect(AuthInfo authInfo) { - // TODO - return null; - } + // TODO + //public Session connect(AuthInfo authInfo) { + // return null; + //} /** * Creates a new session on this cluster and sets a keyspace to use. @@ -117,11 +117,10 @@ public Session connect(String keyspace) throws NoHostAvailableException { * @throws NoHostAvailableException if no host can be contacted to set the * {@code keyspace}. */ - public Session connect(String keyspace, AuthInfo authInfo) throws NoHostAvailableException { - Session session = connect(authInfo); - session.manager.setKeyspace(keyspace); - return session; - } + // Session session = connect(authInfo); + // session.manager.setKeyspace(keyspace); + // return session; + //} /** * Returns read-only metadata on the connected cluster. diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 23e32d2a18f..ddb7b6ea922 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -92,6 +92,7 @@ Host getHost(InetSocketAddress address) { return hosts.get(address); } + // For internal use only Collection allHosts() { return hosts.values(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 0382ec4e512..d8f79e98d28 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -2,6 +2,9 @@ import java.nio.ByteBuffer; import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.TimeUnit; import org.apache.cassandra.cql3.ColumnSpecification; import org.apache.cassandra.transport.Message; @@ -189,6 +192,94 @@ public void onException(Exception exception) { super.setException(exception); } + /** + * Waits for the query to return and return its result. + * + * This method is usually more convenient than {@link #get} as it: + *
    + *
  • It waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • It returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + */ + public ResultSet getUninterruptibly() throws NoHostAvailableException, QueryExecutionException { + try { + while (true) { + try { + return super.get(); + } catch (InterruptedException e) { + // We said 'uninterruptibly' + } + } + } catch (ExecutionException e) { + extractCause(e); + throw new AssertionError(); + } + } + + /** + * Waits for the given time for the query to return and return its + * result if available. + * + * This method is usually more convenient than {@link #get} as it: + *
    + *
  • It waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • It returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws TimeoutException if the wait timed out (Note that this is + * different from a Cassandra timeout, which is a {@code + * QueryExecutionException}). + */ + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAvailableException, QueryExecutionException, TimeoutException { + long start = System.nanoTime(); + long timeoutNanos = unit.toNanos(timeout); + try { + while (true) { + try { + return super.get(timeoutNanos, TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + // We said 'uninterruptibly' + long now = System.nanoTime(); + long elapsedNanos = now - start; + timeout = timeoutNanos - elapsedNanos; + start = now; + } + } + } catch (ExecutionException e) { + extractCause(e); + throw new AssertionError(); + } + } + + private static void extractCause(ExecutionException e) throws NoHostAvailableException, QueryExecutionException { + Throwable cause = e.getCause(); + if (cause instanceof NoHostAvailableException) + throw (NoHostAvailableException)cause; + else if (cause instanceof QueryExecutionException) + throw (QueryExecutionException)cause; + else if (cause instanceof DriverUncheckedException) + throw (DriverUncheckedException)cause; + else + throw new DriverInternalError("Unexpected exception thrown", cause); + } + // TODO: Convert to some internal exception private Exception convertException(org.apache.cassandra.exceptions.TransportException te) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 66eba1c1e3c..3d1938d17d1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -51,14 +51,15 @@ public class Session { * @param query the CQL query to execute * @return the result of the query. That result will never be null be can * be empty and will be for any non SELECT query. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. */ - public ResultSet execute(String query) { - // TODO: Deal with exceptions - try { - return executeAsync(query).get(); - } catch (Exception e) { - throw new RuntimeException(e); - } + public ResultSet execute(String query) throws NoHostAvailableException, QueryExecutionException { + return executeAsync(query).getUninterruptibly(); } /** @@ -70,9 +71,15 @@ public ResultSet execute(String query) { * @return the result of the query. That result will never be null be can * be empty and will be for any non SELECT query. * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * * @see #execute(String) */ - public ResultSet execute(CQLQuery query) { + public ResultSet execute(CQLQuery query) throws NoHostAvailableException, QueryExecutionException { return execute(query.toString()); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java index 282e4c6add3..9c153865989 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java @@ -6,7 +6,7 @@ // TODO: should that extend QueryExecutionException. In theory yes, but that's // probably not part of what you want to deal with when you catch // QueryExecutionException? -public class TruncateException extends DriverException { +public class TruncateException extends DriverUncheckedException { public TruncateException(String msg) { super(msg); From f0410f974a6f4f133d37f0a72ab664b72dafb62e Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 28 Sep 2012 17:41:44 +0200 Subject: [PATCH 031/719] Improve reconnection handling --- .../core/AbstractReconnectionHandler.java | 91 +++++++++++++++++++ .../com/datastax/driver/core/Cluster.java | 42 +++++++-- .../driver/core/ControlConnection.java | 87 ++++++++++++------ .../java/com/datastax/driver/core/Host.java | 5 + .../driver/core/ReconnectionHandler.java | 54 ----------- .../driver/core/ReconnectionPolicy.java | 58 ++++++++++++ .../com/datastax/driver/core/SessionTest.java | 10 +- 7 files changed, 254 insertions(+), 93 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java new file mode 100644 index 00000000000..9dda5fbc023 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -0,0 +1,91 @@ +package com.datastax.driver.core; + +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.*; + +import com.datastax.driver.core.transport.Connection; +import com.datastax.driver.core.transport.ConnectionException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +abstract class AbstractReconnectionHandler implements Runnable { + + private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandler.class); + + private final ScheduledExecutorService executor; + private final ReconnectionPolicy policy; + private final AtomicReference currentAttempt; + + private volatile boolean readyForNext; + private volatile ScheduledFuture localFuture; + + public AbstractReconnectionHandler(ScheduledExecutorService executor, ReconnectionPolicy policy, AtomicReference currentAttempt) { + this.executor = executor; + this.policy = policy; + this.currentAttempt = currentAttempt; + } + + protected abstract Connection tryReconnect() throws ConnectionException; + protected abstract void onReconnection(Connection connection); + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { return true; } + protected boolean onUnknownException(Exception e, long nextDelayMs) { return true; } + + public void start() { + executor.schedule(this, policy.nextDelayMs(), TimeUnit.MILLISECONDS); + + localFuture = executor.schedule(this, policy.nextDelayMs(), TimeUnit.MILLISECONDS); + + // If there a previous task, cancel it, so only one reconnection handler runs. + while (true) { + ScheduledFuture previous = currentAttempt.get(); + if (currentAttempt.compareAndSet(previous, localFuture)) { + if (previous != null) + previous.cancel(false); + break; + } + } + readyForNext = true; + } + + public void run() { + // We shouldn't arrive here if the future is cancelled but better safe than sorry + if (localFuture.isCancelled()) + return; + + // Don't run before ready, otherwise our cancel business might end up removing all connection attempts. + while (!readyForNext) { + try { Thread.sleep(5); } catch (InterruptedException e) {}; + } + + try { + onReconnection(tryReconnect()); + currentAttempt.compareAndSet(localFuture, null); + } catch (ConnectionException e) { + long nextDelay = policy.nextDelayMs(); + if (onConnectionException(e, nextDelay)) + reschedule(nextDelay); + else + currentAttempt.compareAndSet(localFuture, null); + } catch (Exception e) { + long nextDelay = policy.nextDelayMs(); + if (onUnknownException(e, nextDelay)) + reschedule(nextDelay); + else + currentAttempt.compareAndSet(localFuture, null); + } + } + + private void reschedule(long nextDelay) { + readyForNext = false; + ScheduledFuture newFuture = executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); + assert localFuture != null; + // If it's not our future the current one, then we've been canceled + if (!currentAttempt.compareAndSet(localFuture, newFuture)) { + newFuture.cancel(false); + } + localFuture = newFuture; + readyForNext = true; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 2818f5c9875..fb1e1cb6578 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -287,6 +287,7 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // TODO: Make that configurable final ConvictionPolicy.Factory convictionPolicyFactory = new SimpleConvictionPolicy.Factory(); + final ReconnectionPolicy.Factory reconnectionPolicyFactory = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); final Connection.Factory connectionFactory; private final ControlConnection controlConnection; @@ -294,7 +295,7 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { final LoadBalancingPolicy.Factory loadBalancingFactory = RoundRobinPolicy.Factory.INSTANCE; // TODO: give a name to the threads of this executor - private final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1); + final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); // TODO: give a name to the threads of this executor final ExecutorService executor = Executors.newCachedThreadPool(); @@ -319,24 +320,47 @@ private Session newSession() { public void onUp(Host host) { logger.trace(String.format("Host %s is UP", host)); + + // If there is a reconnection attempt scheduled for that node, cancel it + ScheduledFuture scheduledAttempt = host.reconnectionAttempt.getAndSet(null); + if (scheduledAttempt != null) + scheduledAttempt.cancel(false); + controlConnection.onUp(host); for (Session s : sessions) s.manager.onUp(host); - - // TODO: We should register reconnection attempts, to avoid starting two of - // them and if this method is called by other means that the - // reconnection handler (like C* tells us it's up), cancel the latter } - public void onDown(Host host) { + public void onDown(final Host host) { logger.trace(String.format("Host %s is DOWN", host)); - controlConnection.onUp(host); + controlConnection.onDown(host); for (Session s : sessions) s.manager.onDown(host); - // Note: we'll basically waste the first successful reconnection that way, but it's probably not a big deal + // Note: we basically waste the first successful reconnection, but it's probably not a big deal logger.debug(String.format("%s is down, scheduling connection retries", host)); - new ReconnectionHandler(host, scheduledExecutor, connectionFactory).start(); + new AbstractReconnectionHandler(scheduledExecutor, reconnectionPolicyFactory.create(), host.reconnectionAttempt) { + + protected Connection tryReconnect() throws ConnectionException { + return connectionFactory.open(host); + } + + protected void onReconnection(Connection connection) { + logger.debug(String.format("Successful reconnection to %s, setting host UP", host)); + host.monitor().reset(); + } + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + logger.debug(String.format("Failed reconnection to %s (%s), scheduling retry in %d milliseconds", host, e.getMessage(), nextDelayMs)); + return true; + } + + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error(String.format("Unknown error during control connection reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); + return true; + } + + }.start(); } public void onAdd(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index e966a9e4baf..b2214aaccb3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -3,7 +3,8 @@ import java.util.Arrays; import java.util.Iterator; import java.util.List; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.*; import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.transport.ConnectionException; @@ -24,41 +25,76 @@ class ControlConnection implements Host.StateListener { private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; - private Connection connection; + private final AtomicReference connectionRef = new AtomicReference(); private final Cluster.Manager cluster; private final LoadBalancingPolicy balancingPolicy; + private final ReconnectionPolicy.Factory reconnectionPolicyFactory = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); + private final AtomicReference reconnectionAttempt = new AtomicReference(); + public ControlConnection(Cluster.Manager cluster) { this.cluster = cluster; this.balancingPolicy = RoundRobinPolicy.Factory.INSTANCE.create(cluster.metadata.allHosts()); } public void reconnect() { + try { + setNewConnection(reconnectInternal()); + } catch (ConnectionException e) { + logger.error("[Control connection] Cannot connect to any host, scheduling retry"); + new AbstractReconnectionHandler(cluster.scheduledExecutor, reconnectionPolicyFactory.create(), reconnectionAttempt) { + protected Connection tryReconnect() throws ConnectionException { + return reconnectInternal(); + } + + protected void onReconnection(Connection connection) { + setNewConnection(connection); + } + + protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { + logger.error(String.format("[Control connection] Cannot connect to any host, scheduling retry in %d milliseconds", nextDelayMs)); + return true; + } + + protected boolean onUnknownException(Exception e, long nextDelayMs) { + logger.error(String.format("[Control connection ]Unknown error during reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); + return true; + } + }.start(); + } + } + + private void setNewConnection(Connection newConnection) { + logger.debug(String.format("[Control connection] Successfully connected to %s", newConnection.address)); + Connection old = connectionRef.getAndSet(newConnection); + if (old != null && !old.isClosed()) + old.close(); + } - if (connection != null) - connection.close(); + private Connection reconnectInternal() throws ConnectionException { Iterator iter = balancingPolicy.newQueryPlan(); while (iter.hasNext()) { Host host = iter.next(); try { - tryConnect(host); - return; + return tryConnect(host); } catch (ConnectionException e) { - // TODO: log something - // Just try next node + if (iter.hasNext()) { + logger.debug(String.format("[Control connection] Failed connecting to %s, trying next host", host)); + } else { + logger.debug(String.format("[Control connection] Failed connecting to %s, no more host to try", host)); + throw e; + } } } - - // TODO: we should log an error but reschedule for later - throw new RuntimeException(); + throw new ConnectionException(null, "Cannot connect to any host"); } - public void tryConnect(Host host) throws ConnectionException { - connection = cluster.connectionFactory.open(host); - logger.trace(String.format("Control connection connected to %s", host)); + private Connection tryConnect(Host host) throws ConnectionException { + Connection connection = cluster.connectionFactory.open(host); + logger.trace("[Control connection] Registering for events"); List evs = Arrays.asList(new Event.Type[]{ Event.Type.TOPOLOGY_CHANGE, Event.Type.STATUS_CHANGE, @@ -66,16 +102,13 @@ public void tryConnect(Host host) throws ConnectionException { }); connection.write(new RegisterMessage(evs)); - refreshSchema(); + logger.trace("[Control connection] Refreshing schema"); + refreshSchema(connection); // TODO: also catch up on potentially missed nodes (and node that happens to be up but not known to us) + return connection; } - public void refreshSchema() { - // Shouldn't happen unless we have bigger problems, but no reason to NPE - if (connection == null || connection.isClosed()) { - reconnect(); - } - + private void refreshSchema(Connection connection) { // Make sure we're up to date on metadata try { ResultSet.Future ksFuture = new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES)); @@ -88,10 +121,10 @@ public void refreshSchema() { // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified cluster.metadata.rebuildSchema(ksFuture.get(), cfFuture.get(), colsFuture.get()); } catch (ConnectionException e) { - // TODO: log + logger.debug(String.format("[Control connection] Connection error when refeshing schema (%s)", e.getMessage())); reconnect(); } catch (ExecutionException e) { - // TODO: log and decide what to do since in theory that shouldn't be a cassandra exception + logger.error("[Control connection] Unexpected error while refeshing schema", e); reconnect(); } catch (InterruptedException e) { // TODO: it's bad to do that but at the same time it's annoying to be interrupted @@ -106,9 +139,11 @@ public void onUp(Host host) { public void onDown(Host host) { balancingPolicy.onDown(host); - // TODO: we should look if that's the host we're connected with and - // attempt a reconnect. However we need to make that thread safe - // somehow. + // If that's the host we're connected to, and we haven't yet schedul a reconnection, pre-emptively start one + Connection current = connectionRef.get(); + logger.trace(String.format("[Control connection] %s is down, currently connected to %s", host, current == null ? "nobody" : current.address)); + if (current != null && current.address.equals(host.getAddress()) && reconnectionAttempt.get() == null) + reconnect(); } public void onAdd(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index ac4aa9fb168..0f8493a9767 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -2,7 +2,9 @@ import java.net.InetSocketAddress; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.ScheduledFuture; import com.datastax.driver.core.transport.ConnectionException; @@ -16,6 +18,9 @@ public class Host { private final InetSocketAddress address; private final HealthMonitor monitor; + // Tracks reconnection attempts to that host so we avoid adding multiple tasks + final AtomicReference reconnectionAttempt = new AtomicReference(); + // ClusterMetadata keeps one Host object per inet address, so don't use // that constructor unless you know what you do (use ClusterMetadata.getHost typically). Host(InetSocketAddress address, ConvictionPolicy.Factory policy) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java deleted file mode 100644 index 18775fc4e59..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionHandler.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.datastax.driver.core; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import com.datastax.driver.core.transport.Connection; -import com.datastax.driver.core.transport.ConnectionException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -class ReconnectionHandler implements Runnable { - - private static final Logger logger = LoggerFactory.getLogger(ReconnectionHandler.class); - - private final Host host; - private final ScheduledExecutorService executor; - private final Connection.Factory factory; - - // The next delay in milliseconds - // TODO: Implements something better than "every 3 seconds" - // TODO: And also evict the node after some (long) time - private int nextDelay = 3000; - - public ReconnectionHandler(Host host, ScheduledExecutorService executor, Connection.Factory factory) { - this.host = host; - this.executor = executor; - this.factory = factory; - } - - public void start() { - executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); - } - - public void run() { - try { - factory.open(host); - // If we're successful, the node is up and ready - logger.debug(String.format("Successful connection to %s, setting host UP", host)); - host.monitor().reset(); - } catch (ConnectionException e) { - // TODO: log the failure and implement some better policy of retry - scheduleRetry(); - } catch (Exception e) { - // TODO: log that something is wrong - scheduleRetry(); - } - } - - private void scheduleRetry() { - logger.debug(String.format("Failed connection to %s, scheduling retry in %d milliseconds", host, nextDelay)); - executor.schedule(this, nextDelay, TimeUnit.MILLISECONDS); - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java new file mode 100644 index 00000000000..789caafa3b9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java @@ -0,0 +1,58 @@ +package com.datastax.driver.core; + +public interface ReconnectionPolicy { + + public long nextDelayMs(); + + public interface Factory { + public ReconnectionPolicy create(); + } + + public static class Constant implements ReconnectionPolicy { + + private final long delayMs; + + // TODO: validate arguments + private Constant(long delayMs) { + this.delayMs = delayMs; + } + + public long nextDelayMs() { + return delayMs; + } + + public static ReconnectionPolicy.Factory makeFactory(final long constantDelayMs) { + return new ReconnectionPolicy.Factory() { + public ReconnectionPolicy create() { + return new Constant(constantDelayMs); + } + }; + } + } + + public static class Exponential implements ReconnectionPolicy { + + private final long baseDelayMs; + private final long maxDelayMs; + private int attempts; + + // TODO: validate arguments + private Exponential(long baseDelayMs, long maxDelayMs) { + this.baseDelayMs = baseDelayMs; + this.maxDelayMs = maxDelayMs; + } + + public long nextDelayMs() { + ++attempts; + return baseDelayMs * (1 << attempts); + } + + public static ReconnectionPolicy.Factory makeFactory(final long baseDelayMs, final long maxDelayMs) { + return new ReconnectionPolicy.Factory() { + public ReconnectionPolicy create() { + return new Exponential(baseDelayMs, maxDelayMs); + } + }; + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index d59def5df9e..158d7d72d02 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -11,6 +11,8 @@ import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; +import com.datastax.driver.core.exceptions.*; + public class SessionTest { // I really think we should make sure the library doesn't complain about @@ -136,12 +138,12 @@ public void MultiNodeContinuousExecuteTest() throws Exception { try { session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); // We should deal with that sleep - try { Thread.sleep(1000); } catch (Exception e) {} - session.use("test_ks"); + try { Thread.sleep(2000); } catch (Exception e) {} + session.execute("USE test_ks"); session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - } catch (Exception e) { + } catch (AlreadyExistsException e) { // Skip if already exists - session.use("test_ks"); + session.execute("USE test_ks"); } //System.out.println("--- Schema ---\n" + cluster.getMetadata()); From d9398b6dd8b11fb8aaa0742282f721f8a46a800d Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 28 Sep 2012 17:54:00 +0200 Subject: [PATCH 032/719] Give better names to threads --- .../src/main/java/com/datastax/driver/core/Cluster.java | 7 +++---- .../java/com/datastax/driver/core/ControlConnection.java | 2 +- .../com/datastax/driver/core/transport/Connection.java | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index fb1e1cb6578..2493c0d433f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -294,11 +294,10 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // TODO: make configurable final LoadBalancingPolicy.Factory loadBalancingFactory = RoundRobinPolicy.Factory.INSTANCE; - // TODO: give a name to the threads of this executor - final ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(2); + final ScheduledExecutorService reconnectionExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Reconnection")); // TODO: give a name to the threads of this executor - final ExecutorService executor = Executors.newCachedThreadPool(); + final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cassandra Java Driver worker")); private Manager(List contactPoints) throws ConnectionException { this.metadata = new ClusterMetadata(this); @@ -339,7 +338,7 @@ public void onDown(final Host host) { // Note: we basically waste the first successful reconnection, but it's probably not a big deal logger.debug(String.format("%s is down, scheduling connection retries", host)); - new AbstractReconnectionHandler(scheduledExecutor, reconnectionPolicyFactory.create(), host.reconnectionAttempt) { + new AbstractReconnectionHandler(reconnectionExecutor, reconnectionPolicyFactory.create(), host.reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { return connectionFactory.open(host); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index b2214aaccb3..9eff25ebdbe 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -43,7 +43,7 @@ public void reconnect() { setNewConnection(reconnectInternal()); } catch (ConnectionException e) { logger.error("[Control connection] Cannot connect to any host, scheduling retry"); - new AbstractReconnectionHandler(cluster.scheduledExecutor, reconnectionPolicyFactory.create(), reconnectionAttempt) { + new AbstractReconnectionHandler(cluster.reconnectionExecutor, reconnectionPolicyFactory.create(), reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { return reconnectInternal(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java index 811f570e43c..94fa1826787 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java @@ -290,14 +290,14 @@ public Factory(DefaultResponseHandler defaultHandler) { */ public Connection open(Host host) throws ConnectionException { InetSocketAddress address = host.getAddress(); - String name = address.toString() + "-" + getIdGenerator(host).getAndIncrement(); + String name =address.toString() + "-" + getIdGenerator(host).getAndIncrement(); return new Connection(name, address, this); } private AtomicInteger getIdGenerator(Host host) { AtomicInteger g = idGenerators.get(host); if (g == null) { - g = new AtomicInteger(0); + g = new AtomicInteger(1); AtomicInteger old = idGenerators.putIfAbsent(host, g); if (old != null) g = old; From f5458669aab4265b86800a720fabc7a7195f70a9 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 2 Oct 2012 17:52:34 +0200 Subject: [PATCH 033/719] Extend collection support --- .../datastax/driver/core/BoundStatement.java | 44 ++++++++- .../com/datastax/driver/core/Cluster.java | 13 ++- .../driver/core/NamedThreadFactory.java | 19 ++++ .../datastax/driver/core/transport/Codec.java | 8 +- .../com/datastax/driver/core/SessionTest.java | 98 +++++++++++-------- 5 files changed, 131 insertions(+), 51 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/NamedThreadFactory.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 9c310077af1..807d0c32d83 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -3,8 +3,9 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; -import java.util.Date; -import java.util.UUID; +import java.util.*; + +import com.datastax.driver.core.transport.Codec; import org.apache.cassandra.db.marshal.*; @@ -241,6 +242,45 @@ public BoundStatement setUUID(String name, UUID v) { return setUUID(metadata().getIdx(name), v); } + public BoundStatement setList(int i, List v) { + DataType type = metadata().type(i); + if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.LIST) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", metadata().name(i), type)); + + // TODO: I should validate that it's the right parameter type + return setValue(i, Codec.>getCodec(type).decompose(v)); + } + + public BoundStatement setList(String name, List v) { + return setList(metadata().getIdx(name), v); + } + + public BoundStatement setMap(int i, Map v) { + DataType type = metadata().type(i); + if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.MAP) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", metadata().name(i), type)); + + // TODO: I should validate that it's the right parameter type + return setValue(i, Codec.>getCodec(type).decompose(v)); + } + + public BoundStatement setMap(String name, Map v) { + return setMap(metadata().getIdx(name), v); + } + + public BoundStatement setSet(int i, Set v) { + DataType type = metadata().type(i); + if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.SET) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", metadata().name(i), type)); + + // TODO: I should validate that it's the right parameter type + return setValue(i, Codec.>getCodec(type).decompose(v)); + } + + public BoundStatement setSet(String name, Set v) { + return setSet(metadata().getIdx(name), v); + } + private Columns metadata() { return statement.metadata; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 2493c0d433f..1daf47d24e7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -295,6 +295,7 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { final LoadBalancingPolicy.Factory loadBalancingFactory = RoundRobinPolicy.Factory.INSTANCE; final ScheduledExecutorService reconnectionExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Reconnection")); + final ScheduledExecutorService scheduledTasksExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Scheduled Tasks")); // TODO: give a name to the threads of this executor final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cassandra Java Driver worker")); @@ -400,10 +401,12 @@ public void handle(Message.Response response) { final Event event = ((EventMessage)response).event; - // When handle is called, the current thread is a network I/O - // thread, and we don't want to block it (typically addHost() will - // create the connection pool to the new node, which can take time) - executor.execute(new Runnable() { + // When handle is called, the current thread is a network I/O thread, and we don't want to block + // it (typically addHost() will create the connection pool to the new node, which can take time) + // Besides, events are usually sent a bit too early (since they're + // triggered once gossip is up, but that before the client-side + // server is up) so adds a second delay. + scheduledTasksExecutor.schedule(new Runnable() { public void run() { switch (event.type) { case TOPOLOGY_CHANGE: @@ -438,7 +441,7 @@ public void run() { break; } } - }); + }, 1, TimeUnit.SECONDS); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/NamedThreadFactory.java b/driver-core/src/main/java/com/datastax/driver/core/NamedThreadFactory.java new file mode 100644 index 00000000000..49f66f3e1a2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/NamedThreadFactory.java @@ -0,0 +1,19 @@ +package com.datastax.driver.core; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +class NamedThreadFactory implements ThreadFactory { + + protected final String name; + protected final AtomicInteger n = new AtomicInteger(1); + + public NamedThreadFactory(String name) { + this.name = name; + } + + public Thread newThread(Runnable runnable) { + return new Thread(runnable, name + "-" + n.getAndIncrement()); + } +} + diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java index 7d8e19ae9ce..f414fd07426 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java @@ -35,11 +35,11 @@ public class Codec { private Codec() {} - public static AbstractType getCodec(DataType type) { + public static AbstractType getCodec(DataType type) { switch (type.kind()) { - case NATIVE: return nativeCodec(type.asNative()); - case COLLECTION: return collectionCodec(type.asCollection()); - case CUSTOM: return customCodec(type.asCustom()); + case NATIVE: return (AbstractType)nativeCodec(type.asNative()); + case COLLECTION: return (AbstractType)collectionCodec(type.asCollection()); + case CUSTOM: return (AbstractType)customCodec(type.asCustom()); default: throw new RuntimeException("Unknow data type kind"); } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 158d7d72d02..0576e1dcf19 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -100,58 +100,76 @@ public static void classSetUp() { // assertEquals(0.2, r.getFloat("f"), 0.01); //} - //@Test - //public void CollectionsTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - // Session session = cluster.connect(); - - // try { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // // We should deal with that sleep - // try { Thread.sleep(1000); } catch (Exception e) {} - // session.use("test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); - // } catch (Exception e) { - // // Skip if already created - // session.use("test_ks"); - // } - - // session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); - // for (CQLRow row : session.execute("SELECT * FROM test")) { - // List l = (List)row.getList("l"); - // Set s = (Set)row.getSet("s"); - // Map m = (Map)row.getMap("m"); - - // System.out.println("l = " + l); - // System.out.println("s = " + s); - // System.out.println("m = " + m); - // } - //} - @Test - public void MultiNodeContinuousExecuteTest() throws Exception { + public void CollectionsTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); Session session = cluster.connect(); try { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); // We should deal with that sleep - try { Thread.sleep(2000); } catch (Exception e) {} + try { Thread.sleep(1000); } catch (Exception e) {} session.execute("USE test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - } catch (AlreadyExistsException e) { - // Skip if already exists + session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + } catch (Exception e) { + // Skip if already created session.execute("USE test_ks"); } - //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); + for (CQLRow row : session.execute("SELECT * FROM test")) { + List l = (List)row.getList("l"); + Set s = (Set)row.getSet("s"); + Map m = (Map)row.getMap("m"); - for (int i = 0; i < 10000; ++i) { - System.out.println(">> " + i); - session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - Thread.currentThread().sleep(1000); + System.out.println("l = " + l); + System.out.println("s = " + s); + System.out.println("m = " + m); + } + + System.out.println("-------"); + + BoundStatement stmt = session.prepare("INSERT INTO test (k, l, s, m) VALUES ('k2', ?, ?, ?)").newBoundStatement(); + stmt.setList(0, Arrays.asList(new Integer[]{ 5, 4, 3, 2, 1 })); + stmt.setSet(1, new HashSet(Arrays.asList(new String[]{ "5", "4", "3", "2", "1" }))); + stmt.setMap(2, new HashMap(){{ put(3, 4); put(1, 42); }}); + session.executePrepared(stmt); + + for (CQLRow row : session.execute("SELECT * FROM test WHERE k = 'k2'")) { + List l = (List)row.getList("l"); + Set s = (Set)row.getSet("s"); + Map m = (Map)row.getMap("m"); + + System.out.println("l = " + l); + System.out.println("s = " + s); + System.out.println("m = " + m); } } + + //@Test + //public void MultiNodeContinuousExecuteTest() throws Exception { + + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + // Session session = cluster.connect(); + + // try { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + // // We should deal with that sleep + // try { Thread.sleep(2000); } catch (Exception e) {} + // session.execute("USE test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + // } catch (AlreadyExistsException e) { + // // Skip if already exists + // session.execute("USE test_ks"); + // } + + // //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + + // for (int i = 0; i < 10000; ++i) { + // System.out.println(">> " + i); + // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + // Thread.currentThread().sleep(1000); + // } + //} } From 7accf28bac0194fdded1c7ffb6a7b79538cf7424 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 3 Oct 2012 17:07:12 +0200 Subject: [PATCH 034/719] Improve collection support --- .../datastax/driver/core/BoundStatement.java | 41 ++++- .../java/com/datastax/driver/core/CQLRow.java | 169 ++++++++++++++---- .../com/datastax/driver/core/DataType.java | 20 +++ .../com/datastax/driver/core/ResultSet.java | 1 - .../datastax/driver/core/transport/Codec.java | 66 ++++--- .../com/datastax/driver/core/SessionTest.java | 12 +- 6 files changed, 245 insertions(+), 64 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 807d0c32d83..2c7834a52f6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -247,11 +247,20 @@ public BoundStatement setList(int i, List v) { if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.LIST) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", metadata().name(i), type)); - // TODO: I should validate that it's the right parameter type + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!v.isEmpty()) { + // Ugly? Yes + Class klass = v.get(0).getClass(); + + DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); + if (!Codec.isCompatible(eltType, klass)) + throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a list of %s", metadata().name(i), type, klass)); + } + return setValue(i, Codec.>getCodec(type).decompose(v)); } - public BoundStatement setList(String name, List v) { + public BoundStatement setList(String name, List v) { return setList(metadata().getIdx(name), v); } @@ -260,11 +269,23 @@ public BoundStatement setMap(int i, Map v) { if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.MAP) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", metadata().name(i), type)); - // TODO: I should validate that it's the right parameter type + if (!v.isEmpty()) { + // Ugly? Yes + Map.Entry entry = v.entrySet().iterator().next(); + Class keysClass = entry.getKey().getClass(); + Class valuesClass = entry.getValue().getClass(); + + DataType.Collection.Map mapType = (DataType.Collection.Map)type; + DataType.Native keysType = (DataType.Native)mapType.getKeysType(); + DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); + if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a map of %s -> %s", metadata().name(i), type, keysType, valuesType)); + } + return setValue(i, Codec.>getCodec(type).decompose(v)); } - public BoundStatement setMap(String name, Map v) { + public BoundStatement setMap(String name, Map v) { return setMap(metadata().getIdx(name), v); } @@ -273,11 +294,19 @@ public BoundStatement setSet(int i, Set v) { if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.SET) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", metadata().name(i), type)); - // TODO: I should validate that it's the right parameter type + if (!v.isEmpty()) { + // Ugly? Yes + Class klass = v.iterator().next().getClass(); + + DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); + if (!Codec.isCompatible(eltType, klass)) + throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a set of %s", metadata().name(i), type, klass)); + } + return setValue(i, Codec.>getCodec(type).decompose(v)); } - public BoundStatement setSet(String name, Set v) { + public BoundStatement setSet(String name, Set v) { return setSet(metadata().getIdx(name), v); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 339d813b11f..8beeb7361ef 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -184,7 +184,7 @@ public long getLong(String name) { * value is NULL, {@code null} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} is not of type TIMESTAMP. + * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. */ public Date getDate(int i) { metadata.checkType(i, DataType.Native.TIMESTAMP); @@ -205,7 +205,7 @@ public Date getDate(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} is not of type TIMESTAMP. + * @throws InvalidTypeException if column {@code name} is not of type TIMESTAMP. */ public Date getDate(String name) { return getDate(metadata.getIdx(name)); @@ -219,7 +219,7 @@ public Date getDate(String name) { * value is NULL, {@code 0.0f} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} is not of type FLOAT. + * @throws InvalidTypeException if column {@code i} is not of type FLOAT. */ public float getFloat(int i) { metadata.checkType(i, DataType.Native.FLOAT); @@ -240,7 +240,7 @@ public float getFloat(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} is not of type FLOAT. + * @throws InvalidTypeException if column {@code name} is not of type FLOAT. */ public float getFloat(String name) { return getFloat(metadata.getIdx(name)); @@ -254,7 +254,7 @@ public float getFloat(String name) { * value is NULL, {@code 0.0} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} is not of type + * @throws InvalidTypeException if column {@code i} is not of type * DOUBLE or FLOAT. */ public double getDouble(int i) { @@ -279,7 +279,7 @@ public double getDouble(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} is not of type + * @throws InvalidTypeException if column {@code name} is not of type * DOUBLE or FLOAT. */ public double getDouble(String name) { @@ -377,7 +377,7 @@ public byte[] getBytes(String name) { * value is NULL, {@code null} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} type is none of: + * @throws InvalidTypeException if column {@code i} type is none of: * VARCHAR, TEXT or ASCII. */ public String getString(int i) { @@ -403,7 +403,7 @@ public String getString(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} type is none of: + * @throws InvalidTypeException if column {@code name} type is none of: * VARCHAR, TEXT or ASCII. */ public String getString(String name) { @@ -418,7 +418,7 @@ public String getString(String name) { * length integer. If the value is NULL, {@code null} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} is not of type VARINT. + * @throws InvalidTypeException if column {@code i} is not of type VARINT. */ public BigInteger getVarInt(int i) { metadata.checkType(i, DataType.Native.VARINT); @@ -439,7 +439,7 @@ public BigInteger getVarInt(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} is not of type VARINT. + * @throws InvalidTypeException if column {@code name} is not of type VARINT. */ public BigInteger getVarInt(String name) { return getVarInt(metadata.getIdx(name)); @@ -453,7 +453,7 @@ public BigInteger getVarInt(String name) { * length decimal. If the value is NULL, {@code null} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} is not of type DECIMAL. + * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. */ public BigDecimal getDecimal(int i) { metadata.checkType(i, DataType.Native.DECIMAL); @@ -474,7 +474,7 @@ public BigDecimal getDecimal(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} is not of type DECIMAL. + * @throws InvalidTypeException if column {@code name} is not of type DECIMAL. */ public BigDecimal getDecimal(String name) { return getDecimal(metadata.getIdx(name)); @@ -488,7 +488,7 @@ public BigDecimal getDecimal(String name) { * If the value is NULL, {@code null} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. - * @throws InvalidTypeException if columns {@code i} is not of type UUID + * @throws InvalidTypeException if column {@code i} is not of type UUID * or TIMEUUID. */ public UUID getUUID(int i) { @@ -512,61 +512,170 @@ public UUID getUUID(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if columns {@code name} is not of type + * @throws InvalidTypeException if column {@code name} is not of type * UUID or TIMEUUID. */ public UUID getUUID(String name) { return getUUID(metadata.getIdx(name)); } - // TODO: I don't have a good idea on how to make that typesafe in a way that is not ugly - public List getList(int i) { + /** + * Returns the {@code i}th value of this row has a list. + * + * @param i the index of the column to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th column in this row as a list of + * {@code elementsClass} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} is not a list or if its + * elements are not of class {@code elementsClass}. + */ + public List getList(int i, Class elementsClass) { + // TODO: this is not as flexible as the methods above. For instance, + // with a list, one cannot ask for getList(i, Long.class). We + // might want to improve that, though that reach into the + // ListType.compose() method. + DataType type = metadata.type(i); if (!(type instanceof DataType.Collection.List)) throw new InvalidTypeException(String.format("Column %s is not of list type", metadata.name(i))); + DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); + if (!Codec.isCompatible(eltType, elementsClass)) + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a list of %s", metadata.name(i), type, elementsClass)); + ByteBuffer value = data.get(i); if (value == null) - return null; + return Collections.emptyList(); // TODO: we could avoid the getCodec call if we kept a reference to the original message. - return (List)Codec.getCodec(type).compose(value); + return (List)Codec.getCodec(type).compose(value); } - public List getList(String name) { - return getList(metadata.getIdx(name)); + /** + * Returns the value of column {@code name} has a list. + * + * @param name the name of the column to retrieve. + * @param elementsClass the class for the elements of the list to retrieve. + * @return the value of the {@code i}th column in this row as a list of + * {@code elementsClass} objects. If the value is NULL, an empty list is + * returned (note that Cassandra makes no difference between an empty list + * and column of type list that is not set). + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code i} is not a list or if its + * elements are not of class {@code elementsClass}. + */ + public List getList(String name, Class elementsClass) { + return getList(metadata.getIdx(name), elementsClass); } - public Set getSet(int i) { + /** + * Returns the {@code i}th value of this row has a set. + * + * @param i the index of the column to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th column in this row as a set of + * {@code elementsClass} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} is not a set or if its + * elements are not of class {@code elementsClass}. + */ + public Set getSet(int i, Class elementsClass) { DataType type = metadata.type(i); if (!(type instanceof DataType.Collection.Set)) throw new InvalidTypeException(String.format("Column %s is not of set type", metadata.name(i))); + DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); + if (!Codec.isCompatible(eltType, elementsClass)) + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a set of %s", metadata.name(i), type, elementsClass)); + ByteBuffer value = data.get(i); if (value == null) - return null; + return Collections.emptySet(); - return (Set)Codec.getCodec(type).compose(value); + return (Set)Codec.getCodec(type).compose(value); } - public Set getSet(String name) { - return getSet(metadata.getIdx(name)); + /** + * Returns the value of column {@code name} has a set. + * + * @param name the name of the column to retrieve. + * @param elementsClass the class for the elements of the set to retrieve. + * @return the value of the {@code i}th column in this row as a set of + * {@code elementsClass} objects. If the value is NULL, an empty set is + * returned (note that Cassandra makes no difference between an empty set + * and column of type set that is not set). + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code i} is not a set or if its + * elements are not of class {@code elementsClass}. + */ + public Set getSet(String name, Class elementsClass) { + return getSet(metadata.getIdx(name), elementsClass); } - public Map getMap(int i) { + /** + * Returns the {@code i}th value of this row has a map. + * + * @param i the index of the column to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of the {@code i}th column in this row as a map of + * {@code keysClass} to {@code valuesClass} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} is not a map, if its + * keys are not of class {@code keysClass} or if its values are not of + * class {@code valuesClass}. + */ + public Map getMap(int i, Class keysClass, Class valuesClass) { DataType type = metadata.type(i); if (!(type instanceof DataType.Collection.Map)) throw new InvalidTypeException(String.format("Column %s is not of map type", metadata.name(i))); + DataType.Collection.Map mapType = (DataType.Collection.Map)type; + DataType.Native keysType = (DataType.Native)mapType.getKeysType(); + DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); + if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.name(i), type, keysType, valuesType)); + ByteBuffer value = data.get(i); if (value == null) - return null; + return Collections.emptyMap(); - return (Map)Codec.getCodec(type).compose(value); + return (Map)Codec.getCodec(type).compose(value); } - public Map getMap(String name) { - return getMap(metadata.getIdx(name)); + /** + * Returns the value of column {@code name} has a map. + * + * @param name the name of the column to retrieve. + * @param keysClass the class for the keys of the map to retrieve. + * @param valuesClass the class for the values of the map to retrieve. + * @return the value of the {@code i}th column in this row as a map of + * {@code keysClass} to {@code valuesClass} objects. If the value is NULL, + * an empty map is returned (note that Cassandra makes no difference + * between an empty map and column of type map that is not set). + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code i} is not a map, if its + * keys are not of class {@code keysClass} or if its values are not of + * class {@code valuesClass}. + */ + public Map getMap(String name, Class keysClass, Class valuesClass) { + return getMap(metadata.getIdx(name), keysClass, valuesClass); } @Override diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index c4a179f727d..d21de2be71a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -88,6 +88,11 @@ public enum Native implements DataType { public Native asNative() { return this; } public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a native one"); } public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a native one"); } + + @Override + public String toString() { + return super.toString().toLowerCase(); + } } /** @@ -123,6 +128,11 @@ public List(DataType elementsType) { public DataType getElementsType() { return elementsType; } + + @Override + public String toString() { + return "list<" + elementsType + ">"; + } } public static class Set extends Collection { @@ -136,6 +146,11 @@ public Set(DataType elementsType) { public DataType getElementsType() { return elementsType; } + + @Override + public String toString() { + return "list<" + elementsType + ">"; + } } public static class Map extends Collection { @@ -155,6 +170,11 @@ public DataType getKeysType() { public DataType getValuesType() { return keysType; } + + @Override + public String toString() { + return "map<" + keysType + ", " + valuesType + ">"; + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index d8f79e98d28..1237ebd493c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -118,7 +118,6 @@ public List fetchAll() { * this ResultSet. */ public Iterator iterator() { - return new Iterator() { public boolean hasNext() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java index f414fd07426..22712805e62 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java @@ -1,7 +1,10 @@ package com.datastax.driver.core.transport; -import java.util.HashMap; -import java.util.Map; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; import com.datastax.driver.core.DataType; @@ -13,25 +16,24 @@ */ public class Codec { - private static Map, DataType.Native> rawNativeMap = new HashMap, DataType.Native>(); - static { - rawNativeMap.put(AsciiType.instance, DataType.Native.ASCII); - rawNativeMap.put(LongType.instance, DataType.Native.BIGINT); - rawNativeMap.put(BytesType.instance, DataType.Native.BLOB); - rawNativeMap.put(BooleanType.instance, DataType.Native.BOOLEAN); - rawNativeMap.put(CounterColumnType.instance, DataType.Native.COUNTER); - rawNativeMap.put(DecimalType.instance, DataType.Native.DECIMAL); - rawNativeMap.put(DoubleType.instance, DataType.Native.DOUBLE); - rawNativeMap.put(FloatType.instance, DataType.Native.FLOAT); - rawNativeMap.put(InetAddressType.instance, DataType.Native.INET); - rawNativeMap.put(Int32Type.instance, DataType.Native.INT); - rawNativeMap.put(UTF8Type.instance, DataType.Native.TEXT); - rawNativeMap.put(DateType.instance, DataType.Native.TIMESTAMP); - rawNativeMap.put(UUIDType.instance, DataType.Native.UUID); - rawNativeMap.put(UTF8Type.instance, DataType.Native.VARCHAR); - rawNativeMap.put(IntegerType.instance, DataType.Native.VARINT); - rawNativeMap.put(TimeUUIDType.instance, DataType.Native.TIMEUUID); - } + private static Map, DataType.Native> rawNativeMap = new HashMap, DataType.Native>() {{ + put(AsciiType.instance, DataType.Native.ASCII); + put(LongType.instance, DataType.Native.BIGINT); + put(BytesType.instance, DataType.Native.BLOB); + put(BooleanType.instance, DataType.Native.BOOLEAN); + put(CounterColumnType.instance, DataType.Native.COUNTER); + put(DecimalType.instance, DataType.Native.DECIMAL); + put(DoubleType.instance, DataType.Native.DOUBLE); + put(FloatType.instance, DataType.Native.FLOAT); + put(InetAddressType.instance, DataType.Native.INET); + put(Int32Type.instance, DataType.Native.INT); + put(UTF8Type.instance, DataType.Native.TEXT); + put(DateType.instance, DataType.Native.TIMESTAMP); + put(UUIDType.instance, DataType.Native.UUID); + put(UTF8Type.instance, DataType.Native.VARCHAR); + put(IntegerType.instance, DataType.Native.VARINT); + put(TimeUUIDType.instance, DataType.Native.TIMEUUID); + }}; private Codec() {} @@ -116,4 +118,26 @@ public static DataType rawTypeToDataType(AbstractType rawType) { // TODO: handle custom return null; } + + public static boolean isCompatible(DataType.Native type, Class klass) { + switch (type) { + case ASCII: return klass.isAssignableFrom(String.class); + case BIGINT: return klass.isAssignableFrom(Long.class); + case BLOB: return klass.isAssignableFrom(ByteBuffer.class); + case BOOLEAN: return klass.isAssignableFrom(Boolean.class); + case COUNTER: return klass.isAssignableFrom(Long.class); + case DECIMAL: return klass.isAssignableFrom(BigDecimal.class); + case DOUBLE: return klass.isAssignableFrom(Double.class); + case FLOAT: return klass.isAssignableFrom(Float.class); + case INET: return klass.isAssignableFrom(InetAddress.class); + case INT: return klass.isAssignableFrom(Integer.class); + case TEXT: return klass.isAssignableFrom(String.class); + case TIMESTAMP: return klass.isAssignableFrom(Date.class); + case UUID: return klass.isAssignableFrom(UUID.class); + case VARCHAR: return klass.isAssignableFrom(String.class); + case VARINT: return klass.isAssignableFrom(BigInteger.class); + case TIMEUUID: return klass.isAssignableFrom(UUID.class); + default: throw new RuntimeException("Unknown native type"); + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 0576e1dcf19..14b14e10019 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -119,9 +119,9 @@ public void CollectionsTest() throws Exception { session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); for (CQLRow row : session.execute("SELECT * FROM test")) { - List l = (List)row.getList("l"); - Set s = (Set)row.getSet("s"); - Map m = (Map)row.getMap("m"); + List l = row.getList("l", Integer.class); + Set s = row.getSet("s", String.class); + Map m = row.getMap("m", Integer.class, Integer.class); System.out.println("l = " + l); System.out.println("s = " + s); @@ -137,9 +137,9 @@ public void CollectionsTest() throws Exception { session.executePrepared(stmt); for (CQLRow row : session.execute("SELECT * FROM test WHERE k = 'k2'")) { - List l = (List)row.getList("l"); - Set s = (Set)row.getSet("s"); - Map m = (Map)row.getMap("m"); + List l = row.getList("l", Integer.class); + Set s = row.getSet("s", String.class); + Map m = row.getMap("m", Integer.class, Integer.class); System.out.println("l = " + l); System.out.println("s = " + s); From e803bb87d7cf66e873d00dc1e9191fe2b26c9854 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 4 Oct 2012 12:33:19 +0200 Subject: [PATCH 035/719] Fix typos --- .../src/main/java/com/datastax/driver/core/CQLRow.java | 2 +- .../main/java/com/datastax/driver/core/DataType.java | 2 +- .../java/com/datastax/driver/core/transport/Codec.java | 2 +- .../java/com/datastax/driver/core/SessionTest.java | 10 +++++----- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 8beeb7361ef..88b0f7ef333 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -648,7 +648,7 @@ public Map getMap(int i, Class keysClass, Class valuesClass) DataType.Native keysType = (DataType.Native)mapType.getKeysType(); DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.name(i), type, keysType, valuesType)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.name(i), type, keysClass, valuesClass)); ByteBuffer value = data.get(i); if (value == null) diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index d21de2be71a..0e6f04a9a52 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -168,7 +168,7 @@ public DataType getKeysType() { } public DataType getValuesType() { - return keysType; + return valuesType; } @Override diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java index 22712805e62..f9f2ed63cdc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java @@ -81,7 +81,7 @@ private static AbstractType collectionCodec(DataType.Collection type) { case MAP: DataType.Collection.Map mt = (DataType.Collection.Map)type; AbstractType mapKeys = getCodec(mt.getKeysType()); - AbstractType mapValues = getCodec(mt.getKeysType()); + AbstractType mapValues = getCodec(mt.getValuesType()); return MapType.getInstance(mapKeys, mapValues); default: throw new RuntimeException("Unknown collection type"); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 14b14e10019..c00f3995ef8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -111,17 +111,17 @@ public void CollectionsTest() throws Exception { // We should deal with that sleep try { Thread.sleep(1000); } catch (Exception e) {} session.execute("USE test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); } catch (Exception e) { // Skip if already created session.execute("USE test_ks"); } - session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 0 : 0, 1 : 1 })"); + session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 1349286846012 : 2 })"); for (CQLRow row : session.execute("SELECT * FROM test")) { List l = row.getList("l", Integer.class); Set s = row.getSet("s", String.class); - Map m = row.getMap("m", Integer.class, Integer.class); + Map m = row.getMap("m", Date.class, Integer.class); System.out.println("l = " + l); System.out.println("s = " + s); @@ -133,13 +133,13 @@ public void CollectionsTest() throws Exception { BoundStatement stmt = session.prepare("INSERT INTO test (k, l, s, m) VALUES ('k2', ?, ?, ?)").newBoundStatement(); stmt.setList(0, Arrays.asList(new Integer[]{ 5, 4, 3, 2, 1 })); stmt.setSet(1, new HashSet(Arrays.asList(new String[]{ "5", "4", "3", "2", "1" }))); - stmt.setMap(2, new HashMap(){{ put(3, 4); put(1, 42); }}); + stmt.setMap(2, new HashMap(){{ put(new Date(1349286846012L), 4); }}); session.executePrepared(stmt); for (CQLRow row : session.execute("SELECT * FROM test WHERE k = 'k2'")) { List l = row.getList("l", Integer.class); Set s = row.getSet("s", String.class); - Map m = row.getMap("m", Integer.class, Integer.class); + Map m = row.getMap("m", Date.class, Integer.class); System.out.println("l = " + l); System.out.println("s = " + s); From 91e9bc2b599b40e8a06fbbadee549fb190f59e38 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 5 Oct 2012 11:15:23 +0200 Subject: [PATCH 036/719] Make sure we don't miss new nodes when the control connection is down --- .../java/com/datastax/driver/core/CQLRow.java | 37 ++++++++++++++ .../com/datastax/driver/core/Cluster.java | 8 +-- .../driver/core/ControlConnection.java | 50 +++++++++++++++++-- 3 files changed, 87 insertions(+), 8 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 88b0f7ef333..94bd7608dea 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -2,6 +2,7 @@ import java.math.BigDecimal; import java.math.BigInteger; +import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.*; @@ -519,6 +520,42 @@ public UUID getUUID(String name) { return getUUID(metadata.getIdx(name)); } + /** + * Returns the {@code i}th value of this row has an InetAddress. + * + * @param i the index of the column to retrieve. + * @return the value of the {@code i}th column in this row as an InetAddress. + * If the value is NULL, {@code null} is returned. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws InvalidTypeException if column {@code i} is not of type INET. + */ + public InetAddress getInet(int i) { + DataType type = metadata.checkType(i, DataType.Native.INET); + + ByteBuffer value = data.get(i); + if (value == null || value.remaining() == 0) + return null; + + return InetAddressType.instance.compose(value); + } + + /** + * Returns the value of column {@code name} has an InetAddress. + * + * @param name the name of the column to retrieve. + * @return the value of column {@code name} as an InetAddress. + * If the value is NULL, {@code null} is returned. + * + * @throws IllegalArgumentException if {@code name} is not part of the + * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type + * INET. + */ + public InetAddress getInet(String name) { + return getInet(metadata.getIdx(name)); + } + /** * Returns the {@code i}th value of this row has a list. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 1daf47d24e7..76e92f33056 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -41,6 +41,11 @@ public class Cluster { private static final Logger logger = LoggerFactory.getLogger(Cluster.class); + /** + * The default cassandra port for the native client protocol. + */ + public static final int DEFAULT_PORT = 9042; + final Manager manager; private Cluster(List contactPoints) { @@ -153,9 +158,6 @@ public interface Configuration { */ public static class Builder implements Configuration { - // TODO: might not be the best default port, look at changing in C* - private static final int DEFAULT_PORT = 8000; - private List addresses = new ArrayList(); public List contactPoints() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 9eff25ebdbe..7e977268560 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -1,8 +1,7 @@ package com.datastax.driver.core; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; +import java.net.InetSocketAddress; +import java.util.*; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.*; @@ -14,6 +13,8 @@ import org.apache.cassandra.transport.messages.RegisterMessage; import org.apache.cassandra.transport.messages.QueryMessage; +import com.google.common.collect.Sets; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -25,6 +26,8 @@ class ControlConnection implements Host.StateListener { private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; + private static final String SELECT_PEERS = "SELECT peer FROM system.peers"; + private final AtomicReference connectionRef = new AtomicReference(); private final Cluster.Manager cluster; @@ -104,12 +107,12 @@ private Connection tryConnect(Host host) throws ConnectionException { logger.trace("[Control connection] Refreshing schema"); refreshSchema(connection); - // TODO: also catch up on potentially missed nodes (and node that happens to be up but not known to us) + refreshNodeList(connection); return connection; } private void refreshSchema(Connection connection) { - // Make sure we're up to date on metadata + // Make sure we're up to date on schema try { ResultSet.Future ksFuture = new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES)); ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES)); @@ -132,6 +135,43 @@ private void refreshSchema(Connection connection) { } } + private void refreshNodeList(Connection connection) { + // Make sure we're up to date on node list + try { + ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS)); + connection.write(peersFuture); + + Set knownHosts = new HashSet(); + for (Host host : cluster.metadata.allHosts()) + knownHosts.add(host.getAddress()); + + Set foundHosts = new HashSet(); + for (CQLRow row : peersFuture.get()) { + if (!row.isNull("peer")) + // TODO: find what port people are using + foundHosts.add(new InetSocketAddress(row.getInet("peer"), Cluster.DEFAULT_PORT)); + } + + // Adds all those we don't know about + for (InetSocketAddress address : Sets.difference(foundHosts, knownHosts)) + cluster.addHost(address, true); + + // Removes all those that seems to have been removed (since we lost the control connection) + for (InetSocketAddress address : Sets.difference(knownHosts, foundHosts)) + cluster.removeHost(cluster.metadata.getHost(address)); + + } catch (ConnectionException e) { + logger.debug(String.format("[Control connection] Connection error when refeshing hosts list (%s)", e.getMessage())); + reconnect(); + } catch (ExecutionException e) { + logger.error("[Control connection] Unexpected error while refeshing hosts list", e); + reconnect(); + } catch (InterruptedException e) { + // TODO: it's bad to do that but at the same time it's annoying to be interrupted + throw new RuntimeException(e); + } + } + public void onUp(Host host) { balancingPolicy.onUp(host); } From 333f6e80c968d539a099d4f3225a99badfd1b0d2 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 5 Oct 2012 16:41:29 +0200 Subject: [PATCH 037/719] Improve javadoc and implements some missing methods --- .../datastax/driver/core/BoundStatement.java | 458 +++++++++++++++++- .../java/com/datastax/driver/core/CQLRow.java | 6 +- .../com/datastax/driver/core/Cluster.java | 2 +- .../datastax/driver/core/ColumnMetadata.java | 15 + .../com/datastax/driver/core/Columns.java | 71 ++- .../driver/core/ControlConnection.java | 2 + .../driver/core/ConvictionPolicy.java | 2 + .../com/datastax/driver/core/DataType.java | 50 ++ .../driver/core/LoadBalancingPolicy.java | 4 +- .../driver/core/PreparedStatement.java | 18 + .../driver/core/ReconnectionPolicy.java | 74 +++ .../com/datastax/driver/core/RetryPolicy.java | 1 + .../driver/core/RetryingCallback.java | 3 +- .../com/datastax/driver/core/Session.java | 16 +- .../exceptions/InvalidQueryException.java | 2 +- .../exceptions/NoHostAvailableException.java | 4 +- .../exceptions/QueryTimeoutException.java | 19 + .../core/exceptions/ReadTimeoutException.java | 16 + .../core/exceptions/UnavailableException.java | 19 + .../driver/core/utils/RoundRobinPolicy.java | 2 +- .../com/datastax/driver/core/SessionTest.java | 148 +++--- 21 files changed, 836 insertions(+), 96 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 2c7834a52f6..134fc229e30 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -2,6 +2,7 @@ import java.math.BigDecimal; import java.math.BigInteger; +import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.*; @@ -68,20 +69,137 @@ public boolean isSet(String name) { return isSet(metadata().getIdx(name)); } + /** + * Bound values to the variables of this statement. + * + * This method provides a convenience to bound all the variables of the + * {@code BoundStatement} in one call. + * + * @param values the values to bind to the variables of the newly created + * BoundStatement. The first element of {@code values} will be bound to the + * first bind variable, etc.. It is legal to provide less values than the + * statement has bound variables. In that case, the remaining variable need + * to be bound before execution. If more values than variables are provided + * however, an IllegalArgumentException wil be raised. + * @return this bound statement. + * + * @throws IllegalArgumentException if more {@code values} are provided + * than there is of bound variables in this statement. + * @throws InvalidTypeException if any of the provided value is not of + * correct type to be bound to the corresponding bind variable. + */ public BoundStatement bind(Object... values) { - // TODO - return null; + + if (values.length > statement.variables().count()) + throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.variables().count(), values.length)); + + for (int i = 0; i < values.length; i++) + { + Object toSet = values[i]; + DataType columnType = statement.variables().type(i); + switch (columnType.kind()) + { + case NATIVE: + if (!Codec.isCompatible(columnType.asNative(), toSet.getClass())) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but %s provided", i, columnType, toSet.getClass())); + break; + case COLLECTION: + switch (columnType.asCollection().collectionType()) + { + case LIST: + if (!(toSet instanceof List)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a list but %s provided", i, toSet.getClass())); + + List l = (List)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!l.isEmpty()) { + // Ugly? Yes + Class klass = l.get(0).getClass(); + DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); + if (!Codec.isCompatible(eltType, klass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided list value are %s", i, columnType, klass)); + } + break; + case SET: + if (!(toSet instanceof Set)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a set but %s provided", i, toSet.getClass())); + + Set s = (Set)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!s.isEmpty()) { + // Ugly? Yes + Class klass = s.iterator().next().getClass(); + DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); + if (!Codec.isCompatible(eltType, klass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided set value are %s", i, columnType, klass)); + } + break; + case MAP: + if (!(toSet instanceof Map)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a map but %s provided", i, toSet.getClass())); + + Map m = (Map)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!m.isEmpty()) { + // Ugly? Yes + Map.Entry entry = (Map.Entry)m.entrySet().iterator().next(); + Class keysClass = entry.getKey().getClass(); + Class valuesClass = entry.getValue().getClass(); + + DataType.Collection.Map mapType = (DataType.Collection.Map)columnType; + DataType.Native keysType = (DataType.Native)mapType.getKeysType(); + DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); + if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type %s conflicts with provided type %s", i, mapType, toSet.getClass())); + } + break; + + } + break; + case CUSTOM: + // TODO: Not sure how to handle that though + throw new UnsupportedOperationException(); + } + setValue(i, Codec.getCodec(columnType).decompose(toSet)); + } + return this; } + /** + * Set the {@code i}th value to the provided boolean. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. + */ public BoundStatement setBool(int i, boolean v) { metadata().checkType(i, DataType.Native.BOOLEAN); return setValue(i, BooleanType.instance.decompose(v)); } + /** + * Set the value for column {@code name} to the provided boolean. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type BOOLEAN. + */ public BoundStatement setBool(String name, boolean v) { return setBool(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided integer. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is of neither of the + * following types: INT, TIMESTAMP, BIGINT, COUNTER or VARINT. + */ public BoundStatement setInt(int i, int v) { DataType.Native type = metadata().checkType(i, DataType.Native.INT, DataType.Native.TIMESTAMP, @@ -103,10 +221,29 @@ public BoundStatement setInt(int i, int v) { } } + /** + * Set the value for column {@code name} to the provided integer. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is of neither of the + * following types: INT, TIMESTAMP, BIGINT, COUNTER or VARINT. + */ public BoundStatement setInt(String name, int v) { return setInt(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided long. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is of neither of the + * following types: BIGINT, TIMESTAMP, COUNTER or VARINT. + */ public BoundStatement setLong(int i, long v) { DataType.Native type = metadata().checkType(i, DataType.Native.BIGINT, DataType.Native.TIMESTAMP, @@ -125,19 +262,55 @@ public BoundStatement setLong(int i, long v) { } } + /** + * Set the value for column {@code name} to the provided long. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is of neither of the + * following types: BIGINT, TIMESTAMP, COUNTER or VARINT. + */ public BoundStatement setLong(String name, long v) { return setLong(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided date. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. + */ public BoundStatement setDate(int i, Date v) { metadata().checkType(i, DataType.Native.TIMESTAMP); return setValue(i, DateType.instance.decompose(v)); } + /** + * Set the value for column {@code name} to the provided date. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type TIMESTAMP. + */ public BoundStatement setDate(String name, Date v) { return setDate(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided float. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is of neither of the + * following types: FLOAT, DOUBLE or DECIMAL. + */ public BoundStatement setFloat(int i, float v) { DataType.Native type = metadata().checkType(i, DataType.Native.FLOAT, DataType.Native.DOUBLE, @@ -155,10 +328,29 @@ public BoundStatement setFloat(int i, float v) { } } + /** + * Set the value for column {@code name} to the provided float. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is of neither of the + * following types: FLOAT, DOUBLE or DECIMAL. + */ public BoundStatement setFloat(String name, float v) { return setFloat(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided double. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is of neither of the + * following types: DOUBLE or DECIMAL. + */ public BoundStatement setDouble(int i, double v) { DataType.Native type = metadata().checkType(i, DataType.Native.DOUBLE, DataType.Native.DECIMAL); @@ -172,10 +364,29 @@ public BoundStatement setDouble(int i, double v) { } } + /** + * Set the value for column {@code name} to the provided double. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is of neither of the + * following types: DOUBLE or DECIMAL. + */ public BoundStatement setDouble(String name, double v) { return setDouble(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided string. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is of neither of the + * following types: VARCHAR, TEXT or ASCII. + */ public BoundStatement setString(int i, String v) { DataType.Native type = metadata().checkType(i, DataType.Native.VARCHAR, DataType.Native.TEXT, @@ -191,57 +402,245 @@ public BoundStatement setString(int i, String v) { } } + /** + * Set the value for column {@code name} to the provided string. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is of neither of the + * following types: VARCHAR, TEXT or ASCII. + */ public BoundStatement setString(String name, String v) { return setString(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided byte buffer. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setByteBufferUnsafe} instead. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type BLOB. + */ public BoundStatement setByteBuffer(int i, ByteBuffer v) { - return setValue(i, v.duplicate()); + DataType.Native type = metadata().checkType(i, DataType.Native.BLOB); + return setByteBufferUnsafe(i, v); } + /** + * Set the value for column {@code name} to the provided byte buffer. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setByteBufferUnsafe} instead. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type BLOB. + */ public BoundStatement setByteBuffer(String name, ByteBuffer v) { return setByteBuffer(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided byte buffer. + * + * Contrarily to {@link #setByteBuffer}, this method does not check the + * type of the column set. If you insert data that is not compatible with + * the type of the column, you will get an {@code InvalidQueryException} at + * execute time. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + */ + public BoundStatement setByteBufferUnsafe(int i, ByteBuffer v) { + return setValue(i, v.duplicate()); + } + + /** + * Set the value for column {@code name} to the provided byte buffer. + * + * Contrarily to {@link #setByteBuffer}, this method does not check the + * type of the column set. If you insert data that is not compatible with + * the type of the column, you will get an {@code InvalidQueryException} at + * execute time. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + */ + public BoundStatement setByteBufferUnsafe(String name, ByteBuffer v) { + return setByteBufferUnsafe(metadata().getIdx(name), v); + } + + /** + * Set the {@code i}th value to the provided byte array. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setByteBufferUnsafe} instead. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type BLOB. + */ public BoundStatement setBytes(int i, byte[] v) { return setValue(i, ByteBuffer.wrap(v)); } + /** + * Set the value for column {@code name} to the provided byte array. + * + * This method validate that the type of the column set is BLOB. If you + * want to insert manually serialized data into columns of another type, + * use {@link #setByteBufferUnsafe} instead. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type BLOB. + */ public BoundStatement setBytes(String name, byte[] v) { return setBytes(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided big integer. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type VARINT. + */ public BoundStatement setVarInt(int i, BigInteger v) { metadata().checkType(i, DataType.Native.VARINT); return setValue(i, IntegerType.instance.decompose(v)); } + /** + * Set the value for column {@code name} to the provided big integer. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type VARINT. + */ public BoundStatement setVarInt(String name, BigInteger v) { return setVarInt(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided big decimal. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. + */ public BoundStatement setDecimal(int i, BigDecimal v) { metadata().checkType(i, DataType.Native.DECIMAL); return setValue(i, DecimalType.instance.decompose(v)); } + /** + * Set the value for column {@code name} to the provided big decimal. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type DECIMAL. + */ public BoundStatement setDecimal(String name, BigDecimal v) { return setDecimal(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided UUID. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type UUID or + * TIMEUUID, or if columm {@code i} is of type TIMEUUID but {@code v} is + * not a type 1 UUID. + */ public BoundStatement setUUID(int i, UUID v) { DataType.Native type = metadata().checkType(i, DataType.Native.UUID, DataType.Native.TIMEUUID); + if (type == DataType.Native.TIMEUUID && v.version() != 1) + throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", v)); + return type == DataType.Native.UUID ? setValue(i, UUIDType.instance.decompose(v)) : setValue(i, TimeUUIDType.instance.decompose(v)); } + /** + * Set the value for column {@code name} to the provided UUID. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type UUID or + * TIMEUUID, or if columm {@code name} is of type TIMEUUID but {@code v} is + * not a type 1 UUID. + */ public BoundStatement setUUID(String name, UUID v) { return setUUID(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided inet address. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not of type INET. + */ + public BoundStatement setInet(int i, InetAddress v) { + metadata().checkType(i, DataType.Native.INET); + return setValue(i, InetAddressType.instance.decompose(v)); + } + + /** + * Set the value for column {@code name} to the provided inet address. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not of type INET. + */ + public BoundStatement setInet(String name, InetAddress v) { + return setInet(metadata().getIdx(name), v); + } + + /** + * Set the {@code i}th value to the provided list. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not a list type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code i}. + */ public BoundStatement setList(int i, List v) { DataType type = metadata().type(i); if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.LIST) @@ -260,10 +659,31 @@ public BoundStatement setList(int i, List v) { return setValue(i, Codec.>getCodec(type).decompose(v)); } + /** + * Set the value for column {@code name} to the provided list. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not a list type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code name}. + */ public BoundStatement setList(String name, List v) { return setList(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided map. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not a map type or + * if the elements (keys or values) of {@code v} are not of the type of the + * elements of column {@code i}. + */ public BoundStatement setMap(int i, Map v) { DataType type = metadata().type(i); if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.MAP) @@ -285,10 +705,31 @@ public BoundStatement setMap(int i, Map v) { return setValue(i, Codec.>getCodec(type).decompose(v)); } + /** + * Set the value for column {@code name} to the provided map. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not a map type or + * if the elements (keys or values) of {@code v} are not of the type of the + * elements of column {@code name}. + */ public BoundStatement setMap(String name, Map v) { return setMap(metadata().getIdx(name), v); } + /** + * Set the {@code i}th value to the provided set. + * + * @return this BoundStatement. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws InvalidTypeException if column {@code i} is not a set type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code i}. + */ public BoundStatement setSet(int i, Set v) { DataType type = metadata().type(i); if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.SET) @@ -306,6 +747,17 @@ public BoundStatement setSet(int i, Set v) { return setValue(i, Codec.>getCodec(type).decompose(v)); } + /** + * Set the value for column {@code name} to the provided set. + * + * @return this BoundStatement. + * + * @throws IllegalArgumentException if {@code name} is not a prepared + * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. + * @throws InvalidTypeException if column {@code name} is not a set type or + * if the elements of {@code v} are not of the type of the elements of + * column {@code name}. + */ public BoundStatement setSet(String name, Set v) { return setSet(metadata().getIdx(name), v); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 94bd7608dea..7ec857b174e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -604,7 +604,7 @@ public List getList(int i, Class elementsClass) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if column {@code i} is not a list or if its + * @throws InvalidTypeException if column {@code name} is not a list or if its * elements are not of class {@code elementsClass}. */ public List getList(String name, Class elementsClass) { @@ -653,7 +653,7 @@ public Set getSet(int i, Class elementsClass) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if column {@code i} is not a set or if its + * @throws InvalidTypeException if column {@code name} is not a set or if its * elements are not of class {@code elementsClass}. */ public Set getSet(String name, Class elementsClass) { @@ -707,7 +707,7 @@ public Map getMap(int i, Class keysClass, Class valuesClass) * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if column {@code i} is not a map, if its + * @throws InvalidTypeException if column {@code name} is not a map, if its * keys are not of class {@code keysClass} or if its values are not of * class {@code valuesClass}. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 76e92f33056..34a01cec6c5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -98,7 +98,7 @@ public Session connect() { * Creates a new session on this cluster and sets a keyspace to use. * * @param keyspace The name of the keyspace to use for the created - * {@code Session}. This can be later changed using {@link Session#use}. + * {@code Session}. * @return a new session on this cluster sets to keyspace * {@code keyspaceName}. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index 50e7fa8750c..fd9a2fb4002 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -42,14 +42,29 @@ static ColumnMetadata build(TableMetadata tm, CQLRow row) { } } + /** + * The name of the column. + * + * @return the name of the column. + */ public String getName() { return name; } + /** + * The metadata of the table this column is part of. + * + * @return the {@code TableMetadata} for the table this column is part of. + */ public TableMetadata getTable() { return table; } + /** + * The type of the column. + * + * @return the type of the column. + */ public DataType getType() { return type; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index d8a183913dc..13032612108 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -66,14 +66,30 @@ public List asList() { return Arrays.asList(byIdx); } + /** + * Returns the list of the names for the columns defined in these metadata. + * + * @return the list of the names for the columns defined in these metadata. + * The names in the returned list will be in the order of this metadata. + */ public List names() { - // TODO - return null; + List names = new ArrayList(byIdx.length); + for (Definition def : byIdx) + names.add(def.name); + return names; } + /** + * Returns the list of the types for the columns defined in these metadata. + * + * @return the list of the types for the columns defined in these metadata. + * The types in the returned list will be in the order of this metadata. + */ public List types() { - // TODO - return null; + List types = new ArrayList(byIdx.length); + for (Definition def : byIdx) + types.add(def.type); + return types; } /** @@ -188,12 +204,15 @@ DataType.Native checkType(int i, DataType.Native... types) { throw new InvalidTypeException(String.format("Column %s is of type %s", name(i), defined)); } + /** + * A column definition. + */ public static class Definition { - public final String keyspace; - public final String table; - public final String name; - public final DataType type; + private final String keyspace; + private final String table; + private final String name; + private final DataType type; private Definition(String keyspace, String table, String name, DataType type) { @@ -206,5 +225,41 @@ private Definition(String keyspace, String table, String name, DataType type) { static Definition fromTransportSpecification(ColumnSpecification spec) { return new Definition(spec.ksName, spec.cfName, spec.name.toString(), Codec.rawTypeToDataType(spec.type)); } + + /** + * The name of the keyspace this column is part of. + * + * @return the name of the keyspace this column is part of. + */ + public String getKeyspace() { + return keyspace; + } + + /** + * The name of the table this column is part of. + * + * @return the name of the table this column is part of. + */ + public String getTable() { + return table; + } + + /** + * The name of the column. + * + * @return the name of the column. + */ + public String getName() { + return name; + } + + /** + * The type of the column. + * + * @return the type of the column. + */ + public DataType getType() { + return type; + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 7e977268560..1d5516adc24 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -146,6 +146,8 @@ private void refreshNodeList(Connection connection) { knownHosts.add(host.getAddress()); Set foundHosts = new HashSet(); + // The node on which we're connected won't be in the peer table, so let's just add it manually + foundHosts.add(connection.address); for (CQLRow row : peersFuture.get()) { if (!row.isNull("peer")) // TODO: find what port people are using diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java index 38bbe88a074..0558e7af642 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -10,6 +10,8 @@ public interface ConvictionPolicy { /** * Called when a connection error occurs on a connection to the host this policy applies to. * + * @param exception the connection error that occurred. + * * @return {@code true} if the host should be considered down. */ public boolean addFailure(ConnectionException exception); diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index 0e6f04a9a52..e8a6fb8e44b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -111,20 +111,38 @@ protected Collection(Type type) { public Kind kind() { return Kind.COLLECTION; } + /** + * The type of collection. + * + * @return the type of collection. + */ public Type collectionType() { return type; } public Native asNative() { throw new IllegalStateException("Not a native type, but a collection one"); } public Collection asCollection() { return this; } public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a collection one"); } + /** + * The type of lists. + */ public static class List extends Collection { private final DataType elementsType; + /** + * Creates a list type with the provided element type. + * + * @param elementsType the type of the elements of the list. + */ public List(DataType elementsType) { super(Type.LIST); this.elementsType = elementsType; } + /** + * The data type of the elements for this list type. + * + * @return the data type of the elements for this list type. + */ public DataType getElementsType() { return elementsType; } @@ -135,14 +153,27 @@ public String toString() { } } + /** + * The type of sets. + */ public static class Set extends Collection { private final DataType elementsType; + /** + * Creates a set type with the provided element type. + * + * @param elementsType the type of the elements of the set. + */ public Set(DataType elementsType) { super(Type.SET); this.elementsType = elementsType; } + /** + * The data type of the elements for this set type. + * + * @return the data type of the elements for this set type. + */ public DataType getElementsType() { return elementsType; } @@ -153,20 +184,39 @@ public String toString() { } } + /** + * The type of maps. + */ public static class Map extends Collection { private final DataType keysType; private final DataType valuesType; + /** + * Creates a map type with the provided key and value type. + * + * @param keysType the type of the keys of the map. + * @param valuesType the type of the keys of the map. + */ public Map(DataType keysType, DataType valuesType) { super(Type.MAP); this.keysType = keysType; this.valuesType = valuesType; } + /** + * The data type of the keys for this map type. + * + * @return the data type of the keys for this map type. + */ public DataType getKeysType() { return keysType; } + /** + * The data type of the values for this map type. + * + * @return the data type of the values for this map type. + */ public DataType getValuesType() { return valuesType; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java index 94e6babb122..87e550705b0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java @@ -7,8 +7,8 @@ * The policy that decides which Cassandra hosts to contact for each new query. * * The main method to implement is {@link LoadBalancingPolicy#newQueryPlan} and - * is used for each query to find which host to query, and which hosts use as - * failover. + * is used for each query to find which host to query first, and which hosts to + * use as failover. * * The {@code LoadBalancingPolicy} is a {@link Host.StateListener} and is thus * informed of hosts up/down events. For efficiency purposes, the policy is diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 6674f0c6eb6..832baa2f47e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -48,11 +48,29 @@ public Columns variables() { return metadata; } + /** + * Creates a new BoundStatement object and bind its variables to the + * provided values. + * + * This method is a shortcut for {@code this.newBoundStatement().bind(...)}. + * + * @param values the values to bind to the variables of the newly created + * BoundStatement. + * @return the newly created {@code BoundStatement} with its variables + * bound to {@code values}. + * + * @see {@link BoundStatement#bind} + */ public BoundStatement bind(Object... values) { BoundStatement bs = new BoundStatement(this); return bs.bind(values); } + /** + * Creates a new {@code BoundStatement} from this prepared statement. + * + * @return the newly created {@code BoundStatement}. + */ public BoundStatement newBoundStatement() { return new BoundStatement(this); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java index 789caafa3b9..58af142de13 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java @@ -1,13 +1,54 @@ package com.datastax.driver.core; +/** + * Policy that decides how often the reconnection to a dead node is attempted. + * + * Each time a node is detected dead (because a connection error occurs), a new + * {@code ReconnectionPolicy} instance is created (based on which {@link + * ReconnectionPolicy.Factory} has been configured). Then each call to the + * {@link #nextDelayMs} method of this instance will decide when the next + * reconnection attempt to this node will be tried. + * + * Note that independently of the reconnection policy, the driver will attempt + * a reconnection if it received a push notification from the Cassandra cluster + * that the node is UP again. So this reconnection policy is mainly useful in + * case where the client loose connection to a node without that node actually + * being down. + * + * The default {@link ReconnectionPolicy.Exponential} policy is usually + * adequate. + */ public interface ReconnectionPolicy { + /** + * When to attempt the next reconnection. + * + * This method will be called once when the host is detected down to + * schedul the first reconnection attempt, and then once after each failed + * reconnection attempt to schedule the next one. Hence each call to this + * method are free to return a different value. + * + * @return a time in milliseconds to wait before attempting the next + * reconnection. + */ public long nextDelayMs(); + /** + * Simple factory interface to create {@link ReconnectionPolicy} instances. + */ public interface Factory { + + /** + * Creates a new connection policy instance. + * + * @return a new {@code ReconnectionPolicy} instance. + */ public ReconnectionPolicy create(); } + /** + * A reconnection policy that waits a constant time between each reconnection attempt. + */ public static class Constant implements ReconnectionPolicy { private final long delayMs; @@ -21,6 +62,18 @@ public long nextDelayMs() { return delayMs; } + /** + * Creates a reconnection policy factory that creates {@link + * ReconnectionPolicy.Constant} policies with the provided constant wait + * time. + * + * @param constantDelayMs the constant delay in milliseconds to use for + * the reconnection policy created by the factory returned by this + * method. + * @return a reconnection policy factory that creates {@code + * Reconnection.Constant} policies with a {@code constantDelayMs} + * milliseconds delay between reconnection attempts. + */ public static ReconnectionPolicy.Factory makeFactory(final long constantDelayMs) { return new ReconnectionPolicy.Factory() { public ReconnectionPolicy create() { @@ -30,6 +83,11 @@ public ReconnectionPolicy create() { } } + /** + * A reconnection policy that waits exponentially longer between each + * reconnection attempt (but keeps a constant delay once a maximum delay is + * reached). + */ public static class Exponential implements ReconnectionPolicy { private final long baseDelayMs; @@ -47,6 +105,22 @@ public long nextDelayMs() { return baseDelayMs * (1 << attempts); } + /** + * Creates a reconnection policy factory that creates {@link + * ReconnectionPolicy.Exponential} policies with the provided base and + * max delays. + * + * @param baseDelayMs the base delay in milliseconds to use for + * the reconnection policy created by the factory returned by this + * method. Reconnection attempt {@code i} will be tried + * {@code 2^i * baseDelayMs} milliseconds after the previous one + * (unless {@code maxDelayMs} has been reached, in which case all + * following attempts will be done with a delay of {@code maxDelayMs}). + * @param maxDelayMs the maximum delay to wait between two attempts. + * @return a reconnection policy factory that creates {@code + * Reconnection.Constant} policies with a {@code constantDelayMs} + * milliseconds delay between reconnection attempts. + */ public static ReconnectionPolicy.Factory makeFactory(final long baseDelayMs, final long maxDelayMs) { return new ReconnectionPolicy.Factory() { public ReconnectionPolicy create() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java index 243e93534b1..3ffd4d53364 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java @@ -5,6 +5,7 @@ * a TimeoutException or an UnavailableException. * * TODO: is that really useful to have such details if one cannot modify the request? + * TODO: Fix javadoc */ public interface RetryPolicy { diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index fcb9f877f1b..35c9a0e218e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -1,6 +1,7 @@ package com.datastax.driver.core; import java.net.InetSocketAddress; +import java.util.Collections; import java.util.Iterator; import java.util.HashMap; import java.util.Map; @@ -57,7 +58,7 @@ public void sendRequest() { if (query(host)) return; } - callback.onException(new NoHostAvailableException(errors)); + callback.onException(new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); } private boolean query(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 3d1938d17d1..c0686a44d0b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -159,14 +159,11 @@ public PreparedStatement prepare(CQLQuery query) { * @param stmt the prepared statement with values for its bound variables. * @return the result of the query. That result will never be null be can * be empty and will be for any non SELECT query. + * + * @throws IllegalStateException if {@code !stmt.ready()}. */ - public ResultSet executePrepared(BoundStatement stmt) { - // TODO: Deal with exceptions - try { - return executePreparedAsync(stmt).get(); - } catch (Exception e) { - throw new RuntimeException(e); - } + public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException, QueryExecutionException { + return executePreparedAsync(stmt).getUninterruptibly(); } /** @@ -180,8 +177,13 @@ public ResultSet executePrepared(BoundStatement stmt) { * @param stmt the prepared statement with values for its bound variables. * @return the result of the query. That result will never be null be can * be empty and will be for any non SELECT query. + * + * @throws IllegalStateException if {@code !stmt.ready()}. */ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { + if (!stmt.ready()) + throw new IllegalStateException("Some bind variables haven't been bound in the provided statement"); + return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java index 366cae04265..c1a81568049 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidQueryException.java @@ -1,7 +1,7 @@ package com.datastax.driver.core.exceptions; /** - * Indicates a syntaxcally correct but invalid query. + * Indicates a syntactically correct but invalid query. */ public class InvalidQueryException extends QueryValidationException { diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java index 51732b4f45b..3398a22063d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java @@ -22,6 +22,7 @@ public class NoHostAvailableException extends DriverException { private final Map errors; public NoHostAvailableException(Map errors) { + super(makeMessage(errors)); this.errors = errors; } @@ -36,8 +37,7 @@ public Map errors() { return new HashMap(errors); } - @Override - public String getMessage() { + private static String makeMessage(Map errors) { return String.format("All host tried for query are in error (tried: %s)", errors.keySet()); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java index 4c5d5ecfaa9..77c15d02cb4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java @@ -22,14 +22,33 @@ protected QueryTimeoutException(String msg, ConsistencyLevel consistency, int re this.required = required; } + /** + * The consistency level of the operation that time outed. + * + * @return the consistency level of the operation that time outed. + */ public ConsistencyLevel consistencyLevel() { return consistency; } + /** + * The number of replica that had acknowledged/responded to the operation + * before it time outed. + * + * @return the number of replica that had acknowledged/responded the + * operation before it time outed. + */ public int receivedAcknowledgements() { return received; } + /** + * The minimum number of replica acknowledgements/responses that were + * required to fulfill the operation. + * + * @return The minimum number of replica acknowledgements/response that + * were required to fulfill the operation. + */ public int requiredAcknowledgements() { return required; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java index 8c9815ccc26..c611df4d302 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/ReadTimeoutException.java @@ -25,4 +25,20 @@ else if (!dataPresent) else return String.format("timeout while waiting for repair of inconsistent replica"); } + + /** + * Whether the actual data was amongst the received replica responses. + * + * During reads, Cassandra doesn't request data from every replica to + * minimize internal network traffic. Instead, some replica are only asked + * for a checksum of the data. A read timeout may occured even if enough + * replica have responded to fulfill the consistency level if only checksum + * responses have been received. This method allow to detect that case. + * + * @return {@code true} if the data was amongst the received replica + * responses, {@code false} otherwise. + */ + public boolean wasDataRetrieved() { + return dataPresent; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java index 603580ee2a2..83173498b74 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java @@ -19,14 +19,33 @@ public UnavailableException(ConsistencyLevel consistency, int required, int aliv this.alive = alive; } + /** + * The consistency level of the operation triggering this unavailable exception. + * + * @return the consistency level of the operation triggering this unavailable exception. + */ public ConsistencyLevel consistency() { return consistency; } + /** + * The number of replica acknowledgements/responses required to perform the + * operation (with its required consistency level). + * + * @return the number of replica acknowledgements/responses required to perform the + * operation. + */ public int requiredReplicas() { return required; } + /** + * The number of replica that were known to be alive by the Cassandra + * coordinator node when it tried to execute the operation. + * + * @return The number of replica that were known to be alive by the Cassandra + * coordinator node when it tried to execute the operation. + */ public int aliveReplicas() { return alive; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java index 9863d154826..6a5b67295fa 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java @@ -13,7 +13,7 @@ public class RoundRobinPolicy implements LoadBalancingPolicy { private RoundRobinPolicy(Collection hosts) { this.liveHosts = hosts.toArray(new Host[hosts.size()]); - this.index.set(new Random().nextInt(hosts.size())); + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); } public Iterator newQueryPlan() { diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index c00f3995ef8..3c23005f0e3 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -61,91 +61,105 @@ public static void classSetUp() { // assertEquals(0.2, r.getFloat("f"), 0.01); //} - //@Test - //public void PreparedStatementTest() throws Exception { + @Test + public void PreparedStatementTest() throws Exception { - // Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); - // Session session = cluster.connect(); + Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); + Session session = cluster.connect(); - // session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); - // session.use("test_ks"); - // session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); + try + { + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + session.execute("USE test_ks"); + session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); + } catch (Exception e) { + // Skip if already created + session.execute("USE test_ks"); + } - // PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); - // PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); + PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); + PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); - // ResultSet rs; - // BoundStatement bs; + ResultSet rs; + BoundStatement bs; - // bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); - // rs = session.executePrepared(bs); + bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); + rs = session.executePrepared(bs); - // bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); - // rs = session.executePrepared(bs); + bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); + rs = session.executePrepared(bs); - // bs = selectStmt.newBoundStatement().setString("k", "prep"); - // rs = session.executePrepared(bs); - // List l = rs.fetchAll(); - // assertEquals(2, l.size()); + session.executePrepared(insertStmt.bind("prep", 3, 42.0f)); - // CQLRow r; - // r = l.get(0); - // assertEquals("prep", r.getString(0)); - // assertEquals(1, r.getInt("i")); - // assertEquals(0.1, r.getFloat("f"), 0.01); + bs = selectStmt.newBoundStatement().setString("k", "prep"); + rs = session.executePrepared(bs); - // r = l.get(1); - // assertEquals("prep", r.getString("k")); - // assertEquals(2, r.getInt("i")); - // assertEquals(0.2, r.getFloat("f"), 0.01); - //} + List l = rs.fetchAll(); + assertEquals(3, l.size()); - @Test - public void CollectionsTest() throws Exception { + CQLRow r; + r = l.get(0); + assertEquals("prep", r.getString(0)); + assertEquals(1, r.getInt("i")); + assertEquals(0.1, r.getFloat("f"), 0.01); - Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - Session session = cluster.connect(); + r = l.get(1); + assertEquals("prep", r.getString("k")); + assertEquals(2, r.getInt("i")); + assertEquals(0.2, r.getFloat("f"), 0.01); - try { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // We should deal with that sleep - try { Thread.sleep(1000); } catch (Exception e) {} - session.execute("USE test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); - } catch (Exception e) { - // Skip if already created - session.execute("USE test_ks"); - } + r = l.get(2); + assertEquals("prep", r.getString("k")); + assertEquals(3, r.getInt("i")); + assertEquals(42.0f, r.getFloat("f"), 0.01); + } - session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 1349286846012 : 2 })"); - for (CQLRow row : session.execute("SELECT * FROM test")) { - List l = row.getList("l", Integer.class); - Set s = row.getSet("s", String.class); - Map m = row.getMap("m", Date.class, Integer.class); + //@Test + //public void CollectionsTest() throws Exception { - System.out.println("l = " + l); - System.out.println("s = " + s); - System.out.println("m = " + m); - } + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + // Session session = cluster.connect(); - System.out.println("-------"); + // try { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // // We should deal with that sleep + // try { Thread.sleep(1000); } catch (Exception e) {} + // session.execute("USE test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); + // } catch (Exception e) { + // // Skip if already created + // session.execute("USE test_ks"); + // } - BoundStatement stmt = session.prepare("INSERT INTO test (k, l, s, m) VALUES ('k2', ?, ?, ?)").newBoundStatement(); - stmt.setList(0, Arrays.asList(new Integer[]{ 5, 4, 3, 2, 1 })); - stmt.setSet(1, new HashSet(Arrays.asList(new String[]{ "5", "4", "3", "2", "1" }))); - stmt.setMap(2, new HashMap(){{ put(new Date(1349286846012L), 4); }}); - session.executePrepared(stmt); + // session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 1349286846012 : 2 })"); + // for (CQLRow row : session.execute("SELECT * FROM test")) { + // List l = row.getList("l", Integer.class); + // Set s = row.getSet("s", String.class); + // Map m = row.getMap("m", Date.class, Integer.class); - for (CQLRow row : session.execute("SELECT * FROM test WHERE k = 'k2'")) { - List l = row.getList("l", Integer.class); - Set s = row.getSet("s", String.class); - Map m = row.getMap("m", Date.class, Integer.class); + // System.out.println("l = " + l); + // System.out.println("s = " + s); + // System.out.println("m = " + m); + // } - System.out.println("l = " + l); - System.out.println("s = " + s); - System.out.println("m = " + m); - } - } + // System.out.println("-------"); + + // BoundStatement stmt = session.prepare("INSERT INTO test (k, l, s, m) VALUES ('k2', ?, ?, ?)").newBoundStatement(); + // stmt.setList(0, Arrays.asList(new Integer[]{ 5, 4, 3, 2, 1 })); + // stmt.setSet(1, new HashSet(Arrays.asList(new String[]{ "5", "4", "3", "2", "1" }))); + // stmt.setMap(2, new HashMap(){{ put(new Date(1349286846012L), 4); }}); + // session.executePrepared(stmt); + + // for (CQLRow row : session.execute("SELECT * FROM test WHERE k = 'k2'")) { + // List l = row.getList("l", Integer.class); + // Set s = row.getSet("s", String.class); + // Map m = row.getMap("m", Date.class, Integer.class); + + // System.out.println("l = " + l); + // System.out.println("s = " + s); + // System.out.println("m = " + m); + // } + //} //@Test //public void MultiNodeContinuousExecuteTest() throws Exception { From f233e66b4fbcd99fd252c4a7b7caac3330321356 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 5 Oct 2012 18:29:12 +0200 Subject: [PATCH 038/719] Add missing methods to schema objects --- .../datastax/driver/core/ColumnMetadata.java | 71 +++++++++++++++--- .../driver/core/PreparedStatement.java | 7 +- .../datastax/driver/core/TableMetadata.java | 73 ++++++++++++++++++- 3 files changed, 136 insertions(+), 15 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index fd9a2fb4002..d06eaa7add4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -20,20 +20,20 @@ public class ColumnMetadata { private final TableMetadata table; private final String name; private final DataType type; - private final Index index; + private final IndexMetadata index; - ColumnMetadata(TableMetadata table, String name, DataType type, Index index) { + ColumnMetadata(TableMetadata table, String name, DataType type, CQLRow row) { this.table = table; this.name = name; this.type = type; - this.index = index; + this.index = IndexMetadata.build(this, row); } static ColumnMetadata build(TableMetadata tm, CQLRow row) { try { String name = row.getString(COLUMN_NAME); AbstractType t = TypeParser.parse(row.getString(VALIDATOR)); - ColumnMetadata cm = new ColumnMetadata(tm, name, Codec.rawTypeToDataType(t), Index.build(row)); + ColumnMetadata cm = new ColumnMetadata(tm, name, Codec.rawTypeToDataType(t), row); tm.add(cm); return cm; } catch (RequestValidationException e) { @@ -69,27 +69,76 @@ public DataType getType() { return type; } - static class Index { + /** + * The indexing metadata on this column if the column is indexed. + * + * @return the metadata on the column index if the column is indexed, + * {@code null} otherwise. + */ + public IndexMetadata getIndex() { + return index; + } + + /** + * Metadata on a column index. + */ + public static class IndexMetadata { private static final String INDEX_TYPE = "index_type"; private static final String INDEX_OPTIONS = "index_options"; private static final String INDEX_NAME = "index_name"; - public final String name; - public final String type; - public final Map options = new HashMap(); + private final ColumnMetadata column; + private final String name; + // It doesn't make sense to expose the index type for CQL3 at this + // point (the notion don't exist yet in CQL), but keeping it internally + // so we don't forget it exists + private final String type; + private final Map options = new HashMap(); - private Index(String name, String type) { + private IndexMetadata(ColumnMetadata column, String name, String type) { + this.column = column; this.name = name; this.type = type; } - public static Index build(CQLRow row) { + /** + * The column this index metadata refers to. + * + * @return the column this index metadata refers to. + */ + public ColumnMetadata getIndexedColumn() { + return column; + } + + /** + * The index name. + * + * @return the index name. + */ + public String getName() { + return name; + } + + /** + * Returns a CQL query representing this index. + * + * This method returns a single 'CREATE INDEX' query with the options + * corresponding to this index definition. + * + * @return the 'CREATE INDEX' query corresponding to this index. + */ + public String asCQLQuery() { + TableMetadata table = column.getTable(); + return String.format("CREATE INDEX %s ON %s.%s (%s)", name, table.getKeyspace().getName(), table.getName(), column.getName()); + } + + private static IndexMetadata build(ColumnMetadata column, CQLRow row) { String type = row.getString(INDEX_TYPE); if (type == null) return null; - Index index = new Index(type, row.getString(INDEX_NAME)); + IndexMetadata index = new IndexMetadata(column, type, row.getString(INDEX_NAME)); // TODO: handle options return index; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 832baa2f47e..d106ab050b5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -59,7 +59,12 @@ public Columns variables() { * @return the newly created {@code BoundStatement} with its variables * bound to {@code values}. * - * @see {@link BoundStatement#bind} + * @throws IllegalArgumentException if more {@code values} are provided + * than there is of bound variables in this statement. + * @throws InvalidTypeException if any of the provided value is not of + * correct type to be bound to the corresponding bind variable. + * + * @see BoundStatement#bind */ public BoundStatement bind(Object... values) { BoundStatement bs = new BoundStatement(this); diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index cc61e31c63c..453b6d55c9b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -15,7 +15,7 @@ */ public class TableMetadata { - public static final String CF_NAME = "columnfamily_name"; + private static final String CF_NAME = "columnfamily_name"; private static final String KEY_VALIDATOR = "key_validator"; private static final String COMPARATOR = "comparator"; @@ -148,6 +148,68 @@ public ColumnMetadata getColumn(String name) { return columns.get(name); } + /** + * Returns a list containing all the columns of this table. + * + * The order of the columns in the returned list will be consistent with + * the order of the columns returned by a {@code SELECT * FROM thisTable}: + * the first column will be the partition key, next will be the clustering + * keys in there defined order, and then will follow the rest of the + * columns in alphabetic order. + * + * @return a list containing the metadata for the columns of this table. + */ + public List getColumns() { + return new ArrayList(columns.values()); + } + + /** + * Return the list of columns composing the primary key for this table. + * + * Note that a table will always have at least have a partition key (that + * may itself be one or more columns), so the returned list will at least + * have one element. + * + * @return the list of columns composing the primary key for this table. + */ + public List getPrimaryKey() { + List pk = new ArrayList(partitionKey.size() + clusteringKey.size()); + pk.addAll(partitionKey); + pk.addAll(clusteringKey); + return pk; + } + + /** + * Return the list of columns composing the partition key for this table. + * + * Note that a table will always have a partition key so the returned list + * will at least have one element. + * + * @return the list of columns composing the partition key for this table. + */ + public List getPartitionKey() { + return Collections.unmodifiableList(partitionKey); + } + + /** + * Return the list of columns composing the clustering key for this table. + * + * @return the list of columns composing the clustering key for this table. + * If the clustering key is empty, an empty list is returned. + */ + public List getClusteringKey() { + return Collections.unmodifiableList(clusteringKey); + } + + /** + * The options of this table. + * + * @return the options of this table. + */ + public Options getOptions() { + return options; + } + // :_( private static ObjectMapper jsonMapper = new ObjectMapper(new JsonFactory()); @@ -190,8 +252,13 @@ public String exportAsString() { sb.append(asCQLQuery(true)).append("\n"); - // TODO: handle indexes + for (ColumnMetadata column : columns.values()) { + ColumnMetadata.IndexMetadata index = column.getIndex(); + if (index == null) + continue; + sb.append(index.asCQLQuery()).append("\n"); + } return sb.toString(); } @@ -304,7 +371,7 @@ public static class Options { private final Map compaction = new HashMap(); private final Map compression = new HashMap(); - public Options(CQLRow row) { + Options(CQLRow row) { this.comment = row.isNull(COMMENT) ? "" : row.getString(COMMENT); this.readRepair = row.getDouble(READ_REPAIR); this.localReadRepair = row.getDouble(LOCAL_READ_REPAIR); From f15e7c2711ea2a6e40f8ed10a3e042ba71f6513d Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 5 Oct 2012 19:10:56 +0200 Subject: [PATCH 039/719] Deal with visibility issues the hard way --- .../core/AbstractReconnectionHandler.java | 3 - .../datastax/driver/core/BoundStatement.java | 4 +- .../java/com/datastax/driver/core/CQLRow.java | 4 +- .../com/datastax/driver/core/Cluster.java | 4 - .../driver/core/{transport => }/Codec.java | 4 +- .../datastax/driver/core/ColumnMetadata.java | 2 - .../com/datastax/driver/core/Columns.java | 4 +- .../core/{transport => }/Connection.java | 8 +- .../{transport => }/ConnectionException.java | 4 +- .../driver/core/ConsistencyLevel.java | 2 +- .../driver/core/ControlConnection.java | 12 +-- .../driver/core/ConvictionPolicy.java | 2 - .../java/com/datastax/driver/core/Host.java | 2 - .../core/{pool => }/HostConnectionPool.java | 7 +- .../driver/core/InvalidTypeException.java | 8 -- .../com/datastax/driver/core/ResultSet.java | 74 ++++++++++--------- .../driver/core/RetryingCallback.java | 2 - .../core/{utils => }/RoundRobinPolicy.java | 2 +- .../com/datastax/driver/core/Session.java | 5 +- .../{utils => }/SimpleConvictionPolicy.java | 6 +- .../core/{transport => }/StreamIdHandler.java | 4 +- .../datastax/driver/core/TableMetadata.java | 4 +- .../{transport => }/TransportException.java | 4 +- .../core/exceptions/InvalidTypeException.java | 8 ++ 24 files changed, 76 insertions(+), 103 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{transport => }/Codec.java (99%) rename driver-core/src/main/java/com/datastax/driver/core/{transport => }/Connection.java (98%) rename driver-core/src/main/java/com/datastax/driver/core/{transport => }/ConnectionException.java (84%) rename driver-core/src/main/java/com/datastax/driver/core/{pool => }/HostConnectionPool.java (97%) delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java rename driver-core/src/main/java/com/datastax/driver/core/{utils => }/RoundRobinPolicy.java (98%) rename driver-core/src/main/java/com/datastax/driver/core/{utils => }/SimpleConvictionPolicy.java (75%) rename driver-core/src/main/java/com/datastax/driver/core/{transport => }/StreamIdHandler.java (92%) rename driver-core/src/main/java/com/datastax/driver/core/{transport => }/TransportException.java (79%) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index 9dda5fbc023..db0e1a3034a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -3,9 +3,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.*; -import com.datastax.driver.core.transport.Connection; -import com.datastax.driver.core.transport.ConnectionException; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 134fc229e30..8984c36e7f3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -6,10 +6,10 @@ import java.nio.ByteBuffer; import java.util.*; -import com.datastax.driver.core.transport.Codec; - import org.apache.cassandra.db.marshal.*; +import com.datastax.driver.core.exceptions.InvalidTypeException; + /** * A prepared statement with values bound to the bind variables. *

diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 7ec857b174e..b98439e666c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -6,10 +6,10 @@ import java.nio.ByteBuffer; import java.util.*; -import com.datastax.driver.core.transport.Codec; - import org.apache.cassandra.db.marshal.*; +import com.datastax.driver.core.exceptions.InvalidTypeException; + /** * A CQL Row returned in a {@link ResultSet}. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 34a01cec6c5..049e66d4923 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -11,10 +11,6 @@ import org.apache.cassandra.transport.messages.QueryMessage; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.transport.Connection; -import com.datastax.driver.core.transport.ConnectionException; -import com.datastax.driver.core.utils.SimpleConvictionPolicy; -import com.datastax.driver.core.utils.RoundRobinPolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java rename to driver-core/src/main/java/com/datastax/driver/core/Codec.java index f9f2ed63cdc..58a7c37bddc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.transport; +package com.datastax.driver.core; import java.math.BigDecimal; import java.math.BigInteger; @@ -14,7 +14,7 @@ /** * Static method to code/decode serialized data given their types. */ -public class Codec { +class Codec { private static Map, DataType.Native> rawNativeMap = new HashMap, DataType.Native>() {{ put(AsciiType.instance, DataType.Native.ASCII); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index d06eaa7add4..5af5c859ff6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -2,8 +2,6 @@ import java.util.*; -import com.datastax.driver.core.transport.Codec; - import org.apache.cassandra.exceptions.RequestValidationException; import org.apache.cassandra.db.marshal.AbstractType; import org.apache.cassandra.db.marshal.TypeParser; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 13032612108..2610293ee1d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -2,10 +2,10 @@ import java.util.*; -import com.datastax.driver.core.transport.Codec; - import org.apache.cassandra.cql3.ColumnSpecification; +import com.datastax.driver.core.exceptions.InvalidTypeException; + /** * Metadata describing the columns returned in a {@link ResultSet} or a * {@link PreparedStatement}. diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java similarity index 98% rename from driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java rename to driver-core/src/main/java/com/datastax/driver/core/Connection.java index 94fa1826787..cdbc048f63e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.transport; +package com.datastax.driver.core; import java.net.InetSocketAddress; import java.util.Iterator; @@ -24,7 +24,7 @@ /** * A connection to a Cassandra Node. */ -public class Connection extends org.apache.cassandra.transport.Connection +class Connection extends org.apache.cassandra.transport.Connection { private static final Logger logger = LoggerFactory.getLogger(Connection.class); @@ -385,7 +385,7 @@ public void errorOutAllHandler(ConnectionException ce) { } // TODO: Do we really need that after all? - public static class Future extends SimpleFuture implements ResponseCallback { + static class Future extends SimpleFuture implements ResponseCallback { private final Message.Request request; @@ -406,7 +406,7 @@ public void onException(Exception exception) { } } - public interface ResponseCallback { + interface ResponseCallback { public Message.Request request(); public void onSet(Message.Response response); public void onException(Exception exception); diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/ConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java similarity index 84% rename from driver-core/src/main/java/com/datastax/driver/core/transport/ConnectionException.java rename to driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java index 4797408ede1..5c02388fbd2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/ConnectionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java @@ -1,8 +1,8 @@ -package com.datastax.driver.core.transport; +package com.datastax.driver.core; import java.net.InetSocketAddress; -public class ConnectionException extends Exception +class ConnectionException extends Exception { public final InetSocketAddress address; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java index b5b06fc896c..7e947a191c0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java @@ -11,7 +11,7 @@ public enum ConsistencyLevel LOCAL_QUORUM, EACH_QUORUM; - public static ConsistencyLevel from(org.apache.cassandra.db.ConsistencyLevel cl) { + static ConsistencyLevel from(org.apache.cassandra.db.ConsistencyLevel cl) { switch (cl) { case ANY: return ANY; case ONE: return ONE; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 1d5516adc24..abec4de2c7b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -5,10 +5,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.*; -import com.datastax.driver.core.transport.Connection; -import com.datastax.driver.core.transport.ConnectionException; -import com.datastax.driver.core.utils.RoundRobinPolicy; - import org.apache.cassandra.transport.Event; import org.apache.cassandra.transport.messages.RegisterMessage; import org.apache.cassandra.transport.messages.QueryMessage; @@ -117,9 +113,9 @@ private void refreshSchema(Connection connection) { ResultSet.Future ksFuture = new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES)); ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES)); ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS)); - connection.write(ksFuture); - connection.write(cfFuture); - connection.write(colsFuture); + connection.write(ksFuture.callback); + connection.write(cfFuture.callback); + connection.write(colsFuture.callback); // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified cluster.metadata.rebuildSchema(ksFuture.get(), cfFuture.get(), colsFuture.get()); @@ -139,7 +135,7 @@ private void refreshNodeList(Connection connection) { // Make sure we're up to date on node list try { ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS)); - connection.write(peersFuture); + connection.write(peersFuture.callback); Set knownHosts = new HashSet(); for (Host host : cluster.metadata.allHosts()) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java index 0558e7af642..cbb3099eee7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -1,7 +1,5 @@ package com.datastax.driver.core; -import com.datastax.driver.core.transport.ConnectionException; - /** * The policy with which to decide whether a host should be considered down. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 0f8493a9767..031f9b0aed4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -6,8 +6,6 @@ import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.ScheduledFuture; -import com.datastax.driver.core.transport.ConnectionException; - /** * A Cassandra node. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java similarity index 97% rename from driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java rename to driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 9a89c45d628..68635250318 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/pool/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -1,16 +1,13 @@ -package com.datastax.driver.core.pool; +package com.datastax.driver.core; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import com.datastax.driver.core.Host; -import com.datastax.driver.core.transport.*; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class HostConnectionPool { +class HostConnectionPool { private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); diff --git a/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java deleted file mode 100644 index 8629e2f1ac5..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/InvalidTypeException.java +++ /dev/null @@ -1,8 +0,0 @@ -package com.datastax.driver.core; - -public class InvalidTypeException extends RuntimeException { - - public InvalidTypeException(String msg) { - super(msg); - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 1237ebd493c..e6e4bc5ef85 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -14,7 +14,6 @@ import org.apache.cassandra.transport.messages.ResultMessage; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.transport.Connection; import com.datastax.driver.core.utils.SimpleFuture; import org.slf4j.Logger; @@ -142,53 +141,58 @@ public String toString() { return sb.toString(); } - public static class Future extends SimpleFuture implements Connection.ResponseCallback + public static class Future extends SimpleFuture { private final Session.Manager session; private final Message.Request request; + final ResponseCallback callback = new ResponseCallback(); Future(Session.Manager session, Message.Request request) { this.session = session; this.request = request; } - // TODO: We don't really want to expose that :( - // (Nor onSet/onException if we can avoid it) - public Message.Request request() { - return request; - } + // The only reason this exists is because we don't want to expose its + // method publicly (otherwise Future could have implemented + // Connection.ResponseCallback directly) + class ResponseCallback implements Connection.ResponseCallback { - public void onSet(Message.Response response) { - try { - switch (response.type) { - case RESULT: - ResultMessage rm = (ResultMessage)response; - if (rm.kind == ResultMessage.Kind.SET_KEYSPACE) { - // TODO: I think there is a problem if someone set - // a keyspace, then drop it. But that basically - // means we should reset the keyspace to null in that case. - - // propagate the keyspace change to other connections - session.poolsConfiguration.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); - } - super.set(ResultSet.fromMessage(rm)); - break; - case ERROR: - super.setException(convertException(((ErrorMessage)response).error)); - break; - default: - // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - logger.info("Got " + response); - throw new RuntimeException(); + public Message.Request request() { + return request; + } + + public void onSet(Message.Response response) { + try { + switch (response.type) { + case RESULT: + ResultMessage rm = (ResultMessage)response; + if (rm.kind == ResultMessage.Kind.SET_KEYSPACE) { + // TODO: I think there is a problem if someone set + // a keyspace, then drop it. But that basically + // means we should reset the keyspace to null in that case. + + // propagate the keyspace change to other connections + session.poolsConfiguration.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); + } + set(ResultSet.fromMessage(rm)); + break; + case ERROR: + setException(convertException(((ErrorMessage)response).error)); + break; + default: + // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) + logger.info("Got " + response); + throw new RuntimeException(); + } + } catch (Exception e) { + // TODO: do better + throw new RuntimeException(e); } - } catch (Exception e) { - // TODO: do better - throw new RuntimeException(e); } - } - public void onException(Exception exception) { - super.setException(exception); + public void onException(Exception exception) { + setException(exception); + } } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 35c9a0e218e..6fb314bb863 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -8,8 +8,6 @@ import java.util.concurrent.TimeUnit; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.transport.*; -import com.datastax.driver.core.pool.HostConnectionPool; import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.transport.Message; diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java similarity index 98% rename from driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java index 6a5b67295fa..d0bfe5aa607 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/RoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.utils; +package com.datastax.driver.core; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index c0686a44d0b..96eb1722180 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -5,9 +5,6 @@ import java.util.concurrent.*; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.pool.HostConnectionPool; -import com.datastax.driver.core.transport.Connection; -import com.datastax.driver.core.transport.ConnectionException; import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.*; @@ -305,7 +302,7 @@ public void execute(Connection.ResponseCallback callback) { public ResultSet.Future executeQuery(Message.Request msg) { ResultSet.Future future = new ResultSet.Future(this, msg); - execute(future); + execute(future.callback); return future; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java similarity index 75% rename from driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java index 2c7ebcdb05c..d193cd8ebc8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java @@ -1,8 +1,4 @@ -package com.datastax.driver.core.utils; - -import com.datastax.driver.core.ConvictionPolicy; -import com.datastax.driver.core.Host; -import com.datastax.driver.core.transport.ConnectionException; +package com.datastax.driver.core; public class SimpleConvictionPolicy implements ConvictionPolicy { diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java similarity index 92% rename from driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java rename to driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java index dd95aaa74e7..c1cf830573d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/StreamIdHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.transport; +package com.datastax.driver.core; import java.util.BitSet; import java.util.concurrent.atomic.AtomicInteger; @@ -6,7 +6,7 @@ /** * Handle assigning stream id to message. */ -public class StreamIdHandler { +class StreamIdHandler { // Stream ids are one byte long, signed and we only handle positive values ourselves. private static final int STREAM_ID_COUNT = 128; diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 453b6d55c9b..211374ca0a8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -2,8 +2,6 @@ import java.util.*; -import com.datastax.driver.core.transport.Codec; - import org.apache.cassandra.exceptions.RequestValidationException; import org.apache.cassandra.db.marshal.*; @@ -15,7 +13,7 @@ */ public class TableMetadata { - private static final String CF_NAME = "columnfamily_name"; + static final String CF_NAME = "columnfamily_name"; private static final String KEY_VALIDATOR = "key_validator"; private static final String COMPARATOR = "comparator"; diff --git a/driver-core/src/main/java/com/datastax/driver/core/transport/TransportException.java b/driver-core/src/main/java/com/datastax/driver/core/TransportException.java similarity index 79% rename from driver-core/src/main/java/com/datastax/driver/core/transport/TransportException.java rename to driver-core/src/main/java/com/datastax/driver/core/TransportException.java index 1f7b123966b..39747ce486e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/transport/TransportException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TransportException.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.transport; +package com.datastax.driver.core; import java.net.InetSocketAddress; @@ -6,7 +6,7 @@ * A connection exception that has to do with the transport itself, i.e. that * suggests the node is down. */ -public class TransportException extends ConnectionException +class TransportException extends ConnectionException { public TransportException(InetSocketAddress address, String msg, Throwable cause) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java new file mode 100644 index 00000000000..54f9d427058 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidTypeException.java @@ -0,0 +1,8 @@ +package com.datastax.driver.core.exceptions; + +public class InvalidTypeException extends DriverUncheckedException { + + public InvalidTypeException(String msg) { + super(msg); + } +} From 31cdb9e4e0717084f9af82df7a88616c04dd3621 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 5 Oct 2012 19:15:41 +0200 Subject: [PATCH 040/719] Exposes list of all keyspaces --- .../java/com/datastax/driver/core/ClusterMetadata.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index ddb7b6ea922..d04df1849c1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -118,6 +118,15 @@ public KeyspaceMetadata getKeyspace(String keyspace) { return keyspaces.get(keyspace); } + /** + * Returns a list of all the defined keyspaces. + * + * @return a list of all the defined keyspaces. + */ + public List getKeyspaces() { + return new ArrayList(keyspaces.values()); + } + /** * Return a {@code String} containing CQL queries representing the schema * of this cluster. From 0fae633ccbc7ebe4505f4960799da492aeec312a Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 8 Oct 2012 19:19:18 +0200 Subject: [PATCH 041/719] Handle schema updates --- .../com/datastax/driver/core/Cluster.java | 67 ++++++++--- .../datastax/driver/core/ClusterMetadata.java | 71 +++++++++--- .../datastax/driver/core/ColumnMetadata.java | 3 + .../driver/core/ControlConnection.java | 60 +++++++--- .../driver/core/KeyspaceMetadata.java | 4 +- .../driver/core/PreparedStatement.java | 8 +- .../com/datastax/driver/core/ResultSet.java | 40 +++++-- .../datastax/driver/core/TableMetadata.java | 8 +- .../com/datastax/driver/core/SessionTest.java | 104 +++++++++++------- 9 files changed, 264 insertions(+), 101 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 049e66d4923..4e0df2aa3f0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -44,16 +44,8 @@ public class Cluster { final Manager manager; - private Cluster(List contactPoints) { - try - { - this.manager = new Manager(contactPoints); - } - catch (ConnectionException e) - { - // TODO: We should hide that somehow and only send a (non-checked) exception if there is no node to connect to - throw new RuntimeException(e); - } + private Cluster(List contactPoints) throws NoHostAvailableException { + this.manager = new Manager(contactPoints); } /** @@ -64,8 +56,11 @@ private Cluster(List contactPoints) { * * @param config the Cluster.Configuration to use * @return the newly created Cluster instance + * + * @throws NoHostAvailableException if no host amongst the contact points + * can be reached. */ - public static Cluster buildFrom(Configuration config) { + public static Cluster buildFrom(Configuration config) throws NoHostAvailableException { return new Cluster(config.contactPoints()); } @@ -263,7 +258,17 @@ public Builder addContactPoints(InetSocketAddress... addresses) { return this; } - public Cluster build() { + /** + * Build the cluster with the configured set of initial contact points. + * + * This is a shorthand for {@code Cluster.buildFrom(this)}. + * + * @return the newly build Cluster instance. + * + * @throws NoHostAvailableException if none of the contact points + * provided can be reached. + */ + public Cluster build() throws NoHostAvailableException { return Cluster.buildFrom(this); } } @@ -298,7 +303,7 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // TODO: give a name to the threads of this executor final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cassandra Java Driver worker")); - private Manager(List contactPoints) throws ConnectionException { + private Manager(List contactPoints) throws NoHostAvailableException { this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; this.connectionFactory = new Connection.Factory(this); @@ -307,7 +312,7 @@ private Manager(List contactPoints) throws ConnectionExceptio addHost(address, false); this.controlConnection = new ControlConnection(this); - controlConnection.reconnect(); + controlConnection.connect(); } private Session newSession() { @@ -389,6 +394,17 @@ public void removeHost(Host host) { onRemove(host); } + // TODO: take a lot or something so that if a a getSchema() is called, + // we wait for that to be finished. And maybe avoid multiple refresh at + // the same time. + public void submitSchemaRefresh(final String keyspace, final String table) { + executor.submit(new Runnable() { + public void run() { + controlConnection.refreshSchema(keyspace, table); + } + }); + } + // Called when some message has been received but has been initiated from the server (streamId < 0). public void handle(Message.Response response) { @@ -437,6 +453,29 @@ public void run() { break; } break; + case SCHEMA_CHANGE: + Event.SchemaChange scc = (Event.SchemaChange)event; + switch (scc.change) { + case CREATED: + if (scc.table.isEmpty()) + submitSchemaRefresh(null, null); + else + submitSchemaRefresh(scc.keyspace, null); + break; + case DROPPED: + if (scc.table.isEmpty()) + submitSchemaRefresh(null, null); + else + submitSchemaRefresh(scc.keyspace, null); + break; + case UPDATED: + if (scc.table.isEmpty()) + submitSchemaRefresh(scc.keyspace, null); + else + submitSchemaRefresh(scc.keyspace, scc.table); + break; + } + break; } } }, 1, TimeUnit.SECONDS); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index d04df1849c1..3b6746317e5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -5,11 +5,16 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Keeps metadata on the connected cluster, including known nodes and schema definitions. */ public class ClusterMetadata { + private static final Logger logger = LoggerFactory.getLogger(ClusterMetadata.class); + private final Cluster.Manager cluster; private final ConcurrentMap hosts = new ConcurrentHashMap(); private final ConcurrentMap keyspaces = new ConcurrentHashMap(); @@ -18,11 +23,13 @@ public class ClusterMetadata { this.cluster = cluster; } - void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { + // Synchronized to make it easy to detect dropped keyspaces + synchronized void rebuildSchema(String keyspace, String table, ResultSet ks, ResultSet cfs, ResultSet cols) { Map> cfDefs = new HashMap>(); Map>> colsDefs = new HashMap>>(); + // Gather cf defs for (CQLRow row : cfs) { String ksName = row.getString(KeyspaceMetadata.KS_NAME); List l = cfDefs.get(ksName); @@ -33,6 +40,7 @@ void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { l.add(row); } + // Gather columns per Cf for (CQLRow row : cols) { String ksName = row.getString(KeyspaceMetadata.KS_NAME); String cfName = row.getString(TableMetadata.CF_NAME); @@ -49,24 +57,57 @@ void rebuildSchema(ResultSet ks, ResultSet cfs, ResultSet cols) { l.add(row); } - for (CQLRow ksRow : ks) { - String ksName = ksRow.getString(KeyspaceMetadata.KS_NAME); - KeyspaceMetadata ksm = KeyspaceMetadata.build(ksRow); + if (table == null) { + assert ks != null; + Set addedKs = new HashSet(); + for (CQLRow ksRow : ks) { + String ksName = ksRow.getString(KeyspaceMetadata.KS_NAME); + KeyspaceMetadata ksm = KeyspaceMetadata.build(ksRow); - if (cfDefs.get(ksName) != null) { - for (CQLRow cfRow : cfDefs.get(ksName)) { - String cfName = cfRow.getString(TableMetadata.CF_NAME); - TableMetadata tm = TableMetadata.build(ksm, cfRow); - - if (colsDefs.get(ksName) == null || colsDefs.get(ksName).get(cfName) == null) - continue; + if (cfDefs.containsKey(ksName)) { + buildTableMetadata(ksm, cfDefs.get(ksName), colsDefs.get(ksName)); + } + addedKs.add(ksName); + keyspaces.put(ksName, ksm); + } - for (CQLRow colRow : colsDefs.get(ksName).get(cfName)) { - ColumnMetadata cm = ColumnMetadata.build(tm, colRow); - } + // If keyspace is null, it means we're rebuilding from scratch, so + // remove anything that was not just added as it means it's a dropped keyspace + if (keyspace == null) { + Iterator iter = keyspaces.keySet().iterator(); + while (iter.hasNext()) { + if (!addedKs.contains(iter.next())) + iter.remove(); } } - keyspaces.put(ksName, ksm); + } else { + assert keyspace != null; + KeyspaceMetadata ksm = keyspaces.get(keyspace); + + // If we update a keyspace we don't know about, something went + // wrong. Log an error an schedule a full schema rebuilt. + if (ksm == null) { + logger.error(String.format("Asked to rebuild table %s.%s but I don't know keyspace %s", keyspace, table, keyspace)); + cluster.submitSchemaRefresh(null, null); + return; + } + + if (cfDefs.containsKey(keyspace)) + buildTableMetadata(ksm, cfDefs.get(keyspace), colsDefs.get(keyspace)); + } + } + + private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows, Map> colsDefs) { + for (CQLRow cfRow : cfRows) { + String cfName = cfRow.getString(TableMetadata.CF_NAME); + TableMetadata tm = TableMetadata.build(ksm, cfRow); + + if (colsDefs == null || colsDefs.get(cfName) == null) + continue; + + for (CQLRow colRow : colsDefs.get(cfName)) { + ColumnMetadata cm = ColumnMetadata.build(tm, colRow); + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index 5af5c859ff6..6751858815f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -132,6 +132,9 @@ public String asCQLQuery() { } private static IndexMetadata build(ColumnMetadata column, CQLRow row) { + if (row == null) + return null; + String type = row.getString(INDEX_TYPE); if (type == null) return null; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index abec4de2c7b..273e3e11ae2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -14,6 +14,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.datastax.driver.core.exceptions.NoHostAvailableException; + class ControlConnection implements Host.StateListener { private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); @@ -37,14 +39,23 @@ public ControlConnection(Cluster.Manager cluster) { this.balancingPolicy = RoundRobinPolicy.Factory.INSTANCE.create(cluster.metadata.allHosts()); } - public void reconnect() { + // Only for the initial connection. Does not schedule retries if it fails + public void connect() throws NoHostAvailableException { + setNewConnection(reconnectInternal()); + } + + private void reconnect() { try { setNewConnection(reconnectInternal()); - } catch (ConnectionException e) { + } catch (NoHostAvailableException e) { logger.error("[Control connection] Cannot connect to any host, scheduling retry"); new AbstractReconnectionHandler(cluster.reconnectionExecutor, reconnectionPolicyFactory.create(), reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { - return reconnectInternal(); + try { + return reconnectInternal(); + } catch (NoHostAvailableException e) { + throw new ConnectionException(null, e.getMessage()); + } } protected void onReconnection(Connection connection) { @@ -71,23 +82,27 @@ private void setNewConnection(Connection newConnection) { old.close(); } - private Connection reconnectInternal() throws ConnectionException { + private Connection reconnectInternal() throws NoHostAvailableException { Iterator iter = balancingPolicy.newQueryPlan(); + Map errors = null; while (iter.hasNext()) { Host host = iter.next(); try { return tryConnect(host); } catch (ConnectionException e) { + if (errors == null) + errors = new HashMap(); + errors.put(e.address, e.getMessage()); + if (iter.hasNext()) { logger.debug(String.format("[Control connection] Failed connecting to %s, trying next host", host)); } else { logger.debug(String.format("[Control connection] Failed connecting to %s, no more host to try", host)); - throw e; } } } - throw new ConnectionException(null, "Cannot connect to any host"); + throw new NoHostAvailableException(errors == null ? Collections.emptyMap(): errors); } private Connection tryConnect(Host host) throws ConnectionException { @@ -97,28 +112,45 @@ private Connection tryConnect(Host host) throws ConnectionException { List evs = Arrays.asList(new Event.Type[]{ Event.Type.TOPOLOGY_CHANGE, Event.Type.STATUS_CHANGE, - //Event.Type.SCHEMA_CHANGE, + Event.Type.SCHEMA_CHANGE, }); connection.write(new RegisterMessage(evs)); logger.trace("[Control connection] Refreshing schema"); - refreshSchema(connection); + refreshSchema(connection, null, null); refreshNodeList(connection); return connection; } - private void refreshSchema(Connection connection) { + public void refreshSchema(String keyspace, String table) { + refreshSchema(connectionRef.get(), keyspace, table); + } + + private void refreshSchema(Connection connection, String keyspace, String table) { // Make sure we're up to date on schema try { - ResultSet.Future ksFuture = new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES)); - ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES)); - ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS)); - connection.write(ksFuture.callback); + logger.trace(String.format("[Control connection] Refreshing schema for %s.%s", keyspace, table)); + + String whereClause = ""; + if (keyspace != null) { + whereClause = " WHERE keyspace_name = '" + keyspace + "'"; + if (table != null) + whereClause += " AND columnfamily_name = '" + table + "'"; + } + + ResultSet.Future ksFuture = table == null + ? new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES + whereClause)) + : null; + ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause)); + ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS + whereClause)); + + if (ksFuture != null) + connection.write(ksFuture.callback); connection.write(cfFuture.callback); connection.write(colsFuture.callback); // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified - cluster.metadata.rebuildSchema(ksFuture.get(), cfFuture.get(), colsFuture.get()); + cluster.metadata.rebuildSchema(keyspace, table, ksFuture == null ? null : ksFuture.get(), cfFuture.get(), colsFuture.get()); } catch (ConnectionException e) { logger.debug(String.format("[Control connection] Connection error when refeshing schema (%s)", e.getMessage())); reconnect(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index 6ac9220e3d4..ebbc7307db3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -1,5 +1,7 @@ package com.datastax.driver.core; +import java.util.concurrent.ConcurrentHashMap; + import java.util.*; /** @@ -15,7 +17,7 @@ public class KeyspaceMetadata { private final String name; private final boolean durableWrites; private final Map replication = new HashMap(); - private final Map tables = new HashMap(); + private final Map tables = new ConcurrentHashMap(); private KeyspaceMetadata(String name, boolean durableWrites) { this.name = name; diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index d106ab050b5..3176f1c50b2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -2,6 +2,8 @@ import org.apache.cassandra.transport.messages.ResultMessage; +import com.datastax.driver.core.exceptions.DriverInternalError; + /** * Represents a prepared statement, a query with bound variables that has been * prepared (pre-parsed) by the database. @@ -30,12 +32,8 @@ static PreparedStatement fromMessage(ResultMessage msg) { defs[i] = Columns.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); return new PreparedStatement(new Columns(defs), pmsg.statementId.bytes); - case VOID: - case ROWS: - case SET_KEYSPACE: - throw new RuntimeException("ResultSet received when prepared statement received was expected"); default: - throw new AssertionError(); + throw new DriverInternalError(String.format("%s response received when prepared statement received was expected", msg.kind)); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index e6e4bc5ef85..da52a238e91 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -51,6 +51,7 @@ private static ResultSet fromMessage(ResultMessage msg) { return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); case SET_KEYSPACE: + case SCHEMA_CHANGE: return EMPTY; case PREPARED: throw new RuntimeException("Prepared statement received when a ResultSet was expected"); @@ -166,13 +167,38 @@ public void onSet(Message.Response response) { switch (response.type) { case RESULT: ResultMessage rm = (ResultMessage)response; - if (rm.kind == ResultMessage.Kind.SET_KEYSPACE) { - // TODO: I think there is a problem if someone set - // a keyspace, then drop it. But that basically - // means we should reset the keyspace to null in that case. - - // propagate the keyspace change to other connections - session.poolsConfiguration.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); + switch (rm.kind) { + case SET_KEYSPACE: + // TODO: I think there is a problem if someone set + // a keyspace, then drop it. But that basically + // means we should reset the keyspace to null in that case. + + // propagate the keyspace change to other connections + session.poolsConfiguration.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); + break; + case SCHEMA_CHANGE: + ResultMessage.SchemaChange scc = (ResultMessage.SchemaChange)rm; + switch (scc.change) { + case CREATED: + if (scc.columnFamily.isEmpty()) + session.cluster.manager.submitSchemaRefresh(null, null); + else + session.cluster.manager.submitSchemaRefresh(scc.keyspace, null); + break; + case DROPPED: + if (scc.columnFamily.isEmpty()) + session.cluster.manager.submitSchemaRefresh(null, null); + else + session.cluster.manager.submitSchemaRefresh(scc.keyspace, null); + break; + case UPDATED: + if (scc.columnFamily.isEmpty()) + session.cluster.manager.submitSchemaRefresh(scc.keyspace, null); + else + session.cluster.manager.submitSchemaRefresh(scc.keyspace, scc.columnFamily); + break; + } + break; } set(ResultSet.fromMessage(rm)); break; diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 211374ca0a8..691562413b4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -279,7 +279,7 @@ public String asCQLQuery() { private String asCQLQuery(boolean formatted) { StringBuilder sb = new StringBuilder(); - sb.append("CREATE TABLE ").append(name); + sb.append("CREATE TABLE ").append(name).append(" ("); newLine(sb, formatted); for (ColumnMetadata cm : columns.values()) newLine(sb.append(spaces(4, formatted)).append(cm), formatted); @@ -303,17 +303,15 @@ private String asCQLQuery(boolean formatted) { newLine(sb, formatted); // end PK - newLine(sb, formatted); - // Options - sb.append(" WITH read_repair_chance = ").append(options.readRepair); + sb.append(") WITH read_repair_chance = ").append(options.readRepair); and(sb, formatted).append("local_read_repair_chance = ").append(options.localReadRepair); and(sb, formatted).append("replicate_on_write = ").append(options.replicateOnWrite); and(sb, formatted).append("gc_grace_seconds = ").append(options.gcGrace); and(sb, formatted).append("bloom_filter_fp_chance = ").append(options.bfFpChance); and(sb, formatted).append("caching = ").append(options.caching); if (options.comment != null) - and(sb, formatted).append("comment = ").append(options.comment); + and(sb, formatted).append("comment = '").append(options.comment).append("'"); // TODO: finish (compaction and compression) newLine(sb, formatted); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 3c23005f0e3..4de1b297452 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -61,58 +61,58 @@ public static void classSetUp() { // assertEquals(0.2, r.getFloat("f"), 0.01); //} - @Test - public void PreparedStatementTest() throws Exception { + //@Test + //public void PreparedStatementTest() throws Exception { - Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); - Session session = cluster.connect(); + // Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); + // Session session = cluster.connect(); - try - { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - session.execute("USE test_ks"); - session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); - } catch (Exception e) { - // Skip if already created - session.execute("USE test_ks"); - } + // try + // { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // session.execute("USE test_ks"); + // session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); + // } catch (Exception e) { + // // Skip if already created + // session.execute("USE test_ks"); + // } - PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); - PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); + // PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); + // PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); - ResultSet rs; - BoundStatement bs; + // ResultSet rs; + // BoundStatement bs; - bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); - rs = session.executePrepared(bs); + // bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); + // rs = session.executePrepared(bs); - bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); - rs = session.executePrepared(bs); + // bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); + // rs = session.executePrepared(bs); - session.executePrepared(insertStmt.bind("prep", 3, 42.0f)); + // session.executePrepared(insertStmt.bind("prep", 3, 42.0f)); - bs = selectStmt.newBoundStatement().setString("k", "prep"); - rs = session.executePrepared(bs); + // bs = selectStmt.newBoundStatement().setString("k", "prep"); + // rs = session.executePrepared(bs); - List l = rs.fetchAll(); - assertEquals(3, l.size()); + // List l = rs.fetchAll(); + // assertEquals(3, l.size()); - CQLRow r; - r = l.get(0); - assertEquals("prep", r.getString(0)); - assertEquals(1, r.getInt("i")); - assertEquals(0.1, r.getFloat("f"), 0.01); + // CQLRow r; + // r = l.get(0); + // assertEquals("prep", r.getString(0)); + // assertEquals(1, r.getInt("i")); + // assertEquals(0.1, r.getFloat("f"), 0.01); - r = l.get(1); - assertEquals("prep", r.getString("k")); - assertEquals(2, r.getInt("i")); - assertEquals(0.2, r.getFloat("f"), 0.01); + // r = l.get(1); + // assertEquals("prep", r.getString("k")); + // assertEquals(2, r.getInt("i")); + // assertEquals(0.2, r.getFloat("f"), 0.01); - r = l.get(2); - assertEquals("prep", r.getString("k")); - assertEquals(3, r.getInt("i")); - assertEquals(42.0f, r.getFloat("f"), 0.01); - } + // r = l.get(2); + // assertEquals("prep", r.getString("k")); + // assertEquals(3, r.getInt("i")); + // assertEquals(42.0f, r.getFloat("f"), 0.01); + //} //@Test //public void CollectionsTest() throws Exception { @@ -186,4 +186,28 @@ public void PreparedStatementTest() throws Exception { // Thread.currentThread().sleep(1000); // } //} + + @Test + public void SchemaTest() throws Exception { + + Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + Session session = cluster.connect(); + + try { + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // We should deal with that sleep + try { Thread.sleep(2000); } catch (Exception e) {} + session.execute("USE test_ks"); + session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + } catch (AlreadyExistsException e) { + // Skip if already exists + session.execute("USE test_ks"); + } + + for (int i = 0; i < 10000; ++i) { + System.out.println("--- Schema " + i + " ---"); + System.out.println(cluster.getMetadata().getKeyspace("test_ks").exportAsString()); + Thread.currentThread().sleep(4000); + } + } } From 7a08e8acb671904783b3bd1068165acd1bafbb9e Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 8 Oct 2012 20:05:15 +0200 Subject: [PATCH 042/719] Cleanups (make method naming more coherent) --- .../datastax/driver/core/BoundStatement.java | 36 +++--- .../java/com/datastax/driver/core/CQLRow.java | 22 ++-- .../com/datastax/driver/core/Cluster.java | 10 +- .../datastax/driver/core/ClusterMetadata.java | 2 +- .../java/com/datastax/driver/core/Codec.java | 2 +- .../com/datastax/driver/core/Columns.java | 28 ++--- .../driver/core/ControlConnection.java | 2 +- .../driver/core/ConvictionPolicy.java | 2 + .../com/datastax/driver/core/DataType.java | 14 +-- .../java/com/datastax/driver/core/Host.java | 4 +- .../driver/core/KeyspaceMetadata.java | 4 +- .../driver/core/LoadBalancingPolicy.java | 105 ++++++++++++++++++ .../driver/core/PreparedStatement.java | 2 +- .../com/datastax/driver/core/ResultSet.java | 2 +- .../driver/core/RoundRobinPolicy.java | 103 ----------------- .../com/datastax/driver/core/Session.java | 4 +- .../datastax/driver/core/TableMetadata.java | 18 +-- .../exceptions/AlreadyExistsException.java | 4 +- .../exceptions/NoHostAvailableException.java | 2 +- .../exceptions/QueryTimeoutException.java | 6 +- .../core/exceptions/UnavailableException.java | 6 +- 21 files changed, 191 insertions(+), 187 deletions(-) delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 8984c36e7f3..0f17e51f0de 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -24,7 +24,7 @@ public class BoundStatement { BoundStatement(PreparedStatement statement) { this.statement = statement; - this.values = new ByteBuffer[statement.variables().count()]; + this.values = new ByteBuffer[statement.getVariables().count()]; this.remaining = values.length; } @@ -43,7 +43,7 @@ public PreparedStatement preparedStatement() { * * @return whether all variables are bound. */ - public boolean ready() { + public boolean isReady() { return remaining == 0; } @@ -90,14 +90,14 @@ public boolean isSet(String name) { */ public BoundStatement bind(Object... values) { - if (values.length > statement.variables().count()) - throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.variables().count(), values.length)); + if (values.length > statement.getVariables().count()) + throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.getVariables().count(), values.length)); for (int i = 0; i < values.length; i++) { Object toSet = values[i]; - DataType columnType = statement.variables().type(i); - switch (columnType.kind()) + DataType columnType = statement.getVariables().getType(i); + switch (columnType.getKind()) { case NATIVE: if (!Codec.isCompatible(columnType.asNative(), toSet.getClass())) @@ -642,9 +642,9 @@ public BoundStatement setInet(String name, InetAddress v) { * column {@code i}. */ public BoundStatement setList(int i, List v) { - DataType type = metadata().type(i); - if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.LIST) - throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", metadata().name(i), type)); + DataType type = metadata().getType(i); + if (type.getKind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.LIST) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", metadata().getName(i), type)); // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type if (!v.isEmpty()) { @@ -653,7 +653,7 @@ public BoundStatement setList(int i, List v) { DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); if (!Codec.isCompatible(eltType, klass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a list of %s", metadata().name(i), type, klass)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a list of %s", metadata().getName(i), type, klass)); } return setValue(i, Codec.>getCodec(type).decompose(v)); @@ -685,9 +685,9 @@ public BoundStatement setList(String name, List v) { * elements of column {@code i}. */ public BoundStatement setMap(int i, Map v) { - DataType type = metadata().type(i); - if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.MAP) - throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", metadata().name(i), type)); + DataType type = metadata().getType(i); + if (type.getKind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.MAP) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", metadata().getName(i), type)); if (!v.isEmpty()) { // Ugly? Yes @@ -699,7 +699,7 @@ public BoundStatement setMap(int i, Map v) { DataType.Native keysType = (DataType.Native)mapType.getKeysType(); DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a map of %s -> %s", metadata().name(i), type, keysType, valuesType)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a map of %s -> %s", metadata().getName(i), type, keysType, valuesType)); } return setValue(i, Codec.>getCodec(type).decompose(v)); @@ -731,9 +731,9 @@ public BoundStatement setMap(String name, Map v) { * column {@code i}. */ public BoundStatement setSet(int i, Set v) { - DataType type = metadata().type(i); - if (type.kind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.SET) - throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", metadata().name(i), type)); + DataType type = metadata().getType(i); + if (type.getKind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.SET) + throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", metadata().getName(i), type)); if (!v.isEmpty()) { // Ugly? Yes @@ -741,7 +741,7 @@ public BoundStatement setSet(int i, Set v) { DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); if (!Codec.isCompatible(eltType, klass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a set of %s", metadata().name(i), type, klass)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a set of %s", metadata().getName(i), type, klass)); } return setValue(i, Codec.>getCodec(type).decompose(v)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index b98439e666c..433e34500d7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -35,7 +35,7 @@ static CQLRow fromData(Columns metadata, List data) { * * @return the columns contained in this CQLRow. */ - public Columns columns() { + public Columns getColumns() { return metadata; } @@ -576,13 +576,13 @@ public List getList(int i, Class elementsClass) { // might want to improve that, though that reach into the // ListType.compose() method. - DataType type = metadata.type(i); + DataType type = metadata.getType(i); if (!(type instanceof DataType.Collection.List)) - throw new InvalidTypeException(String.format("Column %s is not of list type", metadata.name(i))); + throw new InvalidTypeException(String.format("Column %s is not of list type", metadata.getName(i))); DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); if (!Codec.isCompatible(eltType, elementsClass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a list of %s", metadata.name(i), type, elementsClass)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a list of %s", metadata.getName(i), type, elementsClass)); ByteBuffer value = data.get(i); if (value == null) @@ -626,13 +626,13 @@ public List getList(String name, Class elementsClass) { * elements are not of class {@code elementsClass}. */ public Set getSet(int i, Class elementsClass) { - DataType type = metadata.type(i); + DataType type = metadata.getType(i); if (!(type instanceof DataType.Collection.Set)) - throw new InvalidTypeException(String.format("Column %s is not of set type", metadata.name(i))); + throw new InvalidTypeException(String.format("Column %s is not of set type", metadata.getName(i))); DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); if (!Codec.isCompatible(eltType, elementsClass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a set of %s", metadata.name(i), type, elementsClass)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a set of %s", metadata.getName(i), type, elementsClass)); ByteBuffer value = data.get(i); if (value == null) @@ -677,15 +677,15 @@ public Set getSet(String name, Class elementsClass) { * class {@code valuesClass}. */ public Map getMap(int i, Class keysClass, Class valuesClass) { - DataType type = metadata.type(i); + DataType type = metadata.getType(i); if (!(type instanceof DataType.Collection.Map)) - throw new InvalidTypeException(String.format("Column %s is not of map type", metadata.name(i))); + throw new InvalidTypeException(String.format("Column %s is not of map type", metadata.getName(i))); DataType.Collection.Map mapType = (DataType.Collection.Map)type; DataType.Native keysType = (DataType.Native)mapType.getKeysType(); DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) - throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.name(i), type, keysClass, valuesClass)); + throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.getName(i), type, keysClass, valuesClass)); ByteBuffer value = data.get(i); if (value == null) @@ -726,7 +726,7 @@ public String toString() { if (bb == null) sb.append("NULL"); else - sb.append(Codec.getCodec(metadata.type(i)).getString(bb)); + sb.append(Codec.getCodec(metadata.getType(i)).getString(bb)); } sb.append("]"); return sb.toString(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 4e0df2aa3f0..4803e122e55 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -61,7 +61,7 @@ private Cluster(List contactPoints) throws NoHostAvailableExc * can be reached. */ public static Cluster buildFrom(Configuration config) throws NoHostAvailableException { - return new Cluster(config.contactPoints()); + return new Cluster(config.getContactPoints()); } /** @@ -141,7 +141,7 @@ public interface Configuration { * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} * for more details on contact points. */ - public List contactPoints(); + public List getContactPoints(); } /** @@ -151,7 +151,7 @@ public static class Builder implements Configuration { private List addresses = new ArrayList(); - public List contactPoints() { + public List getContactPoints() { return addresses; } @@ -295,7 +295,7 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { private final ControlConnection controlConnection; // TODO: make configurable - final LoadBalancingPolicy.Factory loadBalancingFactory = RoundRobinPolicy.Factory.INSTANCE; + final LoadBalancingPolicy.Factory loadBalancingFactory = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE; final ScheduledExecutorService reconnectionExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Reconnection")); final ScheduledExecutorService scheduledTasksExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Scheduled Tasks")); @@ -350,7 +350,7 @@ protected Connection tryReconnect() throws ConnectionException { protected void onReconnection(Connection connection) { logger.debug(String.format("Successful reconnection to %s, setting host UP", host)); - host.monitor().reset(); + host.getMonitor().reset(); } protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 3b6746317e5..e2d2c0e1c01 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -116,7 +116,7 @@ Host add(InetSocketAddress address) { Host previous = hosts.putIfAbsent(address, newHost); if (previous == null) { - newHost.monitor().register(cluster); + newHost.getMonitor().register(cluster); return newHost; } else diff --git a/driver-core/src/main/java/com/datastax/driver/core/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java index 58a7c37bddc..73c9aa49cd7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -38,7 +38,7 @@ class Codec { private Codec() {} public static AbstractType getCodec(DataType type) { - switch (type.kind()) { + switch (type.getKind()) { case NATIVE: return (AbstractType)nativeCodec(type.asNative()); case COLLECTION: return (AbstractType)collectionCodec(type.asCollection()); case CUSTOM: return (AbstractType)customCodec(type.asCustom()); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/Columns.java index 2610293ee1d..9def43ef7b9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Columns.java @@ -72,7 +72,7 @@ public List asList() { * @return the list of the names for the columns defined in these metadata. * The names in the returned list will be in the order of this metadata. */ - public List names() { + public List getNames() { List names = new ArrayList(byIdx.length); for (Definition def : byIdx) names.add(def.name); @@ -85,7 +85,7 @@ public List names() { * @return the list of the types for the columns defined in these metadata. * The types in the returned list will be in the order of this metadata. */ - public List types() { + public List getTypes() { List types = new ArrayList(byIdx.length); for (Definition def : byIdx) types.add(def.type); @@ -99,7 +99,7 @@ public List types() { * * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ - public String name(int i) { + public String getName(int i) { return byIdx[i].name; } @@ -110,7 +110,7 @@ public String name(int i) { * * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ - public DataType type(int i) { + public DataType getType(int i) { return byIdx[i].type; } @@ -121,8 +121,8 @@ public DataType type(int i) { * * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ - public DataType type(String name) { - return type(getIdx(name)); + public DataType geType(String name) { + return getType(getIdx(name)); } /** @@ -132,7 +132,7 @@ public DataType type(String name) { * * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ - public String keyspace(int i) { + public String getKeyspace(int i) { return byIdx[i].keyspace; } @@ -143,8 +143,8 @@ public String keyspace(int i) { * * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ - public String keyspace(String name) { - return keyspace(getIdx(name)); + public String getKeyspace(String name) { + return getKeyspace(getIdx(name)); } /** @@ -154,7 +154,7 @@ public String keyspace(String name) { * * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} */ - public String table(int i) { + public String getTable(int i) { return byIdx[i].table; } @@ -165,8 +165,8 @@ public String table(int i) { * * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ - public String table(String name) { - return table(getIdx(name)); + public String getTable(String name) { + return getTable(getIdx(name)); } public String toString() { @@ -196,12 +196,12 @@ void checkBounds(int i) { } DataType.Native checkType(int i, DataType.Native... types) { - DataType defined = type(i); + DataType defined = getType(i); for (DataType.Native type : types) if (type == defined) return type; - throw new InvalidTypeException(String.format("Column %s is of type %s", name(i), defined)); + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 273e3e11ae2..f9029aa3afe 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -36,7 +36,7 @@ class ControlConnection implements Host.StateListener { public ControlConnection(Cluster.Manager cluster) { this.cluster = cluster; - this.balancingPolicy = RoundRobinPolicy.Factory.INSTANCE.create(cluster.metadata.allHosts()); + this.balancingPolicy = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE.create(cluster.metadata.allHosts()); } // Only for the initial connection. Does not schedule retries if it fails diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java index cbb3099eee7..3d55da25854 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -2,6 +2,8 @@ /** * The policy with which to decide whether a host should be considered down. + * TODO: not sure it's worth exposing this at this point. But if we do, we + * would need to expose ConnectionException */ public interface ConvictionPolicy { diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index e8a6fb8e44b..eeb8dda75b4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -26,7 +26,7 @@ public enum Kind { NATIVE, COLLECTION, CUSTOM } * * @return this type {@link Kind}. */ - public Kind kind(); + public Kind getKind(); /** * Returns this type as a {@link Native} type. @@ -34,7 +34,7 @@ public enum Kind { NATIVE, COLLECTION, CUSTOM } * @return this type as a {@link Native} type. * * @throws IllegalStateException if this type is not a {@link Native} type. - * You should use {@link #kind} to check if this type is a native one + * You should use {@link #getKind} to check if this type is a native one * before calling this method. */ public Native asNative(); @@ -45,7 +45,7 @@ public enum Kind { NATIVE, COLLECTION, CUSTOM } * @return this type as a {@link Collection} type. * * @throws IllegalStateException if this type is not a {@link Collection} - * type. You should use {@link #kind} to check if this type is a collection + * type. You should use {@link #getKind} to check if this type is a collection * one before calling this method. */ public Collection asCollection(); @@ -56,7 +56,7 @@ public enum Kind { NATIVE, COLLECTION, CUSTOM } * @return this type as a {@link Custom} type. * * @throws IllegalStateException if this type is not a {@link Custom} type. - * You should use {@link #kind} to check if this type is a custom one + * You should use {@link #getKind} to check if this type is a custom one * before calling this method. */ public Custom asCustom(); @@ -83,7 +83,7 @@ public enum Native implements DataType { VARINT, TIMEUUID; - public Kind kind() { return Kind.NATIVE; } + public Kind getKind() { return Kind.NATIVE; } public Native asNative() { return this; } public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a native one"); } @@ -109,7 +109,7 @@ protected Collection(Type type) { this.type = type; } - public Kind kind() { return Kind.COLLECTION; } + public Kind getKind() { return Kind.COLLECTION; } /** * The type of collection. @@ -234,7 +234,7 @@ public String toString() { public static class Custom implements DataType { // TODO - public Kind kind() { return Kind.CUSTOM; } + public Kind getKind() { return Kind.CUSTOM; } public Native asNative() { throw new IllegalStateException("Not a native type, but a custom one"); } public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a custom one"); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 031f9b0aed4..2ebebc4e900 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -47,7 +47,7 @@ public InetSocketAddress getAddress() { * * @return the host {@link HealthMonitor}. */ - public HealthMonitor monitor() { + public HealthMonitor getMonitor() { return monitor; } @@ -133,7 +133,7 @@ private void setDown() { * Reset the monitor, setting the host as up and informing the * registered listener that the node is up. */ - public void reset() { + void reset() { isUp = true; policy.reset(); for (Host.StateListener listener : listeners) diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index ebbc7307db3..2d8338e8808 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -59,7 +59,7 @@ public boolean isDurableWrites() { * @return a map containing the replication options for this keyspace. */ public Map getReplication() { - return new HashMap(replication); + return Collections.unmodifiableMap(replication); } /** @@ -80,7 +80,7 @@ public TableMetadata getTable(String name) { * keyspace. */ public Collection getTables() { - return tables.values(); + return Collections.unmodifiableCollection(tables.values()); } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java index 87e550705b0..008883f4f2e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java @@ -2,6 +2,8 @@ import java.util.Collection; import java.util.Iterator; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; /** * The policy that decides which Cassandra hosts to contact for each new query. @@ -44,4 +46,107 @@ public interface Factory { */ public LoadBalancingPolicy create(Collection hosts); } + + /** + * A Round-robin load balancing policy. + * + * This policy queries nodes in a round-robin fashion. For a given query, + * if an host fail, the next one (following the round-robin order) is + * tried, until all hosts have been tried. + */ + public static class RoundRobin implements LoadBalancingPolicy { + + private volatile Host[] liveHosts; + private final AtomicInteger index = new AtomicInteger(); + + private RoundRobin(Collection hosts) { + this.liveHosts = hosts.toArray(new Host[hosts.size()]); + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + } + + public Iterator newQueryPlan() { + + final Host[] hosts = liveHosts; + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) + index.set(0); + + return new Iterator() { + + private int idx = startIdx; + private int remaining = hosts.length; + + public boolean hasNext() { + return remaining > 0; + } + + public Host next() { + Host h = hosts[idx++ % hosts.length]; + remaining--; + return h; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + public synchronized void onUp(Host host) { + + for (Host h : liveHosts) + if (h.equals(host)) + return; + + Host[] newHosts = new Host[liveHosts.length + 1]; + System.arraycopy(liveHosts, 0, newHosts, 0, liveHosts.length); + newHosts[newHosts.length - 1] = host; + liveHosts = newHosts; + } + + public synchronized void onDown(Host host) { + int idx = -1; + for (int i = 0; i < liveHosts.length; i++) { + if (liveHosts[i].equals(host)) { + idx = i; + break; + } + } + + if (idx == -1) + return; + + Host[] newHosts = new Host[liveHosts.length - 1]; + if (idx == 0) { + System.arraycopy(liveHosts, 1, newHosts, 0, newHosts.length); + } else if (idx == liveHosts.length - 1) { + System.arraycopy(liveHosts, 0, newHosts, 0, newHosts.length); + } else { + System.arraycopy(liveHosts, 0, newHosts, 0, idx); + System.arraycopy(liveHosts, idx + 1, newHosts, idx, liveHosts.length - idx - 1); + } + liveHosts = newHosts; + } + + public void onAdd(Host host) { + onUp(host); + } + + public void onRemove(Host host) { + onDown(host); + } + + public static class Factory implements LoadBalancingPolicy.Factory { + + public static final Factory INSTANCE = new Factory(); + + private Factory() {} + + public LoadBalancingPolicy create(Collection hosts) { + return new RoundRobin(hosts); + } + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 3176f1c50b2..db4b48b3768 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -42,7 +42,7 @@ static PreparedStatement fromMessage(ResultMessage msg) { * * @return the variables bounded in this prepared statement. */ - public Columns variables() { + public Columns getVariables() { return metadata; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index da52a238e91..734bf1c925f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -66,7 +66,7 @@ private static ResultSet fromMessage(ResultMessage msg) { * * @return the columns returned in this ResultSet. */ - public Columns columns() { + public Columns getColumns() { return metadata; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java deleted file mode 100644 index d0bfe5aa607..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/RoundRobinPolicy.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.datastax.driver.core; - -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; - -import com.datastax.driver.core.Host; -import com.datastax.driver.core.LoadBalancingPolicy; - -public class RoundRobinPolicy implements LoadBalancingPolicy { - - private volatile Host[] liveHosts; - private final AtomicInteger index = new AtomicInteger(); - - private RoundRobinPolicy(Collection hosts) { - this.liveHosts = hosts.toArray(new Host[hosts.size()]); - this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); - } - - public Iterator newQueryPlan() { - - final Host[] hosts = liveHosts; - final int startIdx = index.getAndIncrement(); - - // Overflow protection; not theoretically thread safe but should be good enough - if (startIdx > Integer.MAX_VALUE - 10000) - index.set(0); - - return new Iterator() { - - private int idx = startIdx; - private int remaining = hosts.length; - - public boolean hasNext() { - return remaining > 0; - } - - public Host next() { - Host h = hosts[idx++ % hosts.length]; - remaining--; - return h; - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - public synchronized void onUp(Host host) { - - for (Host h : liveHosts) - if (h.equals(host)) - return; - - Host[] newHosts = new Host[liveHosts.length + 1]; - System.arraycopy(liveHosts, 0, newHosts, 0, liveHosts.length); - newHosts[newHosts.length - 1] = host; - liveHosts = newHosts; - } - - public synchronized void onDown(Host host) { - int idx = -1; - for (int i = 0; i < liveHosts.length; i++) { - if (liveHosts[i].equals(host)) { - idx = i; - break; - } - } - - if (idx == -1) - return; - - Host[] newHosts = new Host[liveHosts.length - 1]; - if (idx == 0) { - System.arraycopy(liveHosts, 1, newHosts, 0, newHosts.length); - } else if (idx == liveHosts.length - 1) { - System.arraycopy(liveHosts, 0, newHosts, 0, newHosts.length); - } else { - System.arraycopy(liveHosts, 0, newHosts, 0, idx); - System.arraycopy(liveHosts, idx + 1, newHosts, idx, liveHosts.length - idx - 1); - } - liveHosts = newHosts; - } - - public void onAdd(Host host) { - onUp(host); - } - - public void onRemove(Host host) { - onDown(host); - } - - public static class Factory implements LoadBalancingPolicy.Factory { - - public static final Factory INSTANCE = new Factory(); - - private Factory() {} - - public LoadBalancingPolicy create(Collection hosts) { - return new RoundRobinPolicy(hosts); - } - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 96eb1722180..3d91aa32660 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -178,7 +178,7 @@ public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableExce * @throws IllegalStateException if {@code !stmt.ready()}. */ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { - if (!stmt.ready()) + if (!stmt.isReady()) throw new IllegalStateException("Some bind variables haven't been bound in the provided statement"); return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); @@ -233,7 +233,7 @@ public Manager(Cluster cluster, Collection hosts) { } private HostConnectionPool addHost(Host host) { - return pools.put(host, new HostConnectionPool(host, host.monitor().signaler, cluster.manager.connectionFactory, poolsConfiguration)); + return pools.put(host, new HostConnectionPool(host, host.getMonitor().signaler, cluster.manager.connectionFactory, poolsConfiguration)); } public void onUp(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 691562413b4..fbd568ef482 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -389,7 +389,7 @@ public static class Options { * * @return the commentary set for this table, or {@code null} if noe has been set. */ - public String comment() { + public String getComment() { return comment; } @@ -398,7 +398,7 @@ public String comment() { * * @return the read repair change set for table (in [0.0, 1.0]). */ - public double readRepairChance() { + public double getReadRepairChance() { return readRepair; } @@ -407,7 +407,7 @@ public double readRepairChance() { * * @return the local read repair change set for table (in [0.0, 1.0]). */ - public double localReadRepairChance() { + public double getLocalReadRepairChance() { return localReadRepair; } @@ -418,7 +418,7 @@ public double localReadRepairChance() { * * @return whether replicateOnWrite is set for this table. */ - public boolean replicateOnWrite() { + public boolean getReplicateOnWrite() { return replicateOnWrite; } @@ -427,7 +427,7 @@ public boolean replicateOnWrite() { * * @return the tombstone garbage collection grace time in seconds for this table. */ - public int gcGraceInSeconds() { + public int getGcGraceInSeconds() { return gcGrace; } @@ -436,7 +436,7 @@ public int gcGraceInSeconds() { * * @return the bloom filter false positive chance for this table (in [0.0, 1.0]). */ - public double bloomFilterFalsePositiveChance() { + public double getBloomFilterFalsePositiveChance() { return bfFpChance; } @@ -445,7 +445,7 @@ public double bloomFilterFalsePositiveChance() { * * @return the caching option for this table. */ - public String caching() { + public String getCaching() { return caching; } @@ -454,7 +454,7 @@ public String caching() { * * @return a map containing the compaction options for this table. */ - public Map compaction() { + public Map getCompaction() { return new HashMap(compaction); } @@ -463,7 +463,7 @@ public Map compaction() { * * @return a map containing the compression options for this table. */ - public Map compression() { + public Map getCompression() { return new HashMap(compression); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java index ad4df45a35e..d2a301f11bb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AlreadyExistsException.java @@ -42,7 +42,7 @@ public boolean wasTableCreation() { * the table creation attempt (in which case {@link #table} will return the * name of said table). */ - public String keyspace() { + public String getKeyspace() { return keyspace; } @@ -53,7 +53,7 @@ public String keyspace() { * of this name already exists, or {@code null} if the query was a keyspace * creation query. */ - public String table() { + public String getTable() { return table.isEmpty() ? null : table; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java index 3398a22063d..eb22dd5bde6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java @@ -33,7 +33,7 @@ public NoHostAvailableException(Map errors) { * @return a map containing for each tried host a description of the error * triggered when trying it. */ - public Map errors() { + public Map getErrors() { return new HashMap(errors); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java index 77c15d02cb4..f46973e5409 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryTimeoutException.java @@ -27,7 +27,7 @@ protected QueryTimeoutException(String msg, ConsistencyLevel consistency, int re * * @return the consistency level of the operation that time outed. */ - public ConsistencyLevel consistencyLevel() { + public ConsistencyLevel getConsistencyLevel() { return consistency; } @@ -38,7 +38,7 @@ public ConsistencyLevel consistencyLevel() { * @return the number of replica that had acknowledged/responded the * operation before it time outed. */ - public int receivedAcknowledgements() { + public int getReceivedAcknowledgements() { return received; } @@ -49,7 +49,7 @@ public int receivedAcknowledgements() { * @return The minimum number of replica acknowledgements/response that * were required to fulfill the operation. */ - public int requiredAcknowledgements() { + public int getRequiredAcknowledgements() { return required; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java index 83173498b74..8ba0c26cd55 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/UnavailableException.java @@ -24,7 +24,7 @@ public UnavailableException(ConsistencyLevel consistency, int required, int aliv * * @return the consistency level of the operation triggering this unavailable exception. */ - public ConsistencyLevel consistency() { + public ConsistencyLevel getConsistency() { return consistency; } @@ -35,7 +35,7 @@ public ConsistencyLevel consistency() { * @return the number of replica acknowledgements/responses required to perform the * operation. */ - public int requiredReplicas() { + public int getRequiredReplicas() { return required; } @@ -46,7 +46,7 @@ public int requiredReplicas() { * @return The number of replica that were known to be alive by the Cassandra * coordinator node when it tried to execute the operation. */ - public int aliveReplicas() { + public int getAliveReplicas() { return alive; } } From e240d4d9ae7014e14e7869a45ea6d3b9c2920a4c Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 9 Oct 2012 16:05:20 +0200 Subject: [PATCH 043/719] Prepare prepared queries on all nodes --- .../com/datastax/driver/core/Cluster.java | 43 ++++++++++++++++++ .../com/datastax/driver/core/Connection.java | 12 +++-- .../driver/core/PreparedStatement.java | 9 ++-- .../com/datastax/driver/core/ResultSet.java | 3 +- .../driver/core/RetryingCallback.java | 42 +++++++++++++++--- .../com/datastax/driver/core/Session.java | 44 ++++++++++++++++--- 6 files changed, 135 insertions(+), 18 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 4803e122e55..190a17e995f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -5,9 +5,11 @@ import java.util.*; import java.util.concurrent.*; +import org.apache.cassandra.utils.MD5Digest; import org.apache.cassandra.transport.Event; import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.EventMessage; +import org.apache.cassandra.transport.messages.PrepareMessage; import org.apache.cassandra.transport.messages.QueryMessage; import com.datastax.driver.core.exceptions.*; @@ -303,6 +305,13 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // TODO: give a name to the threads of this executor final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cassandra Java Driver worker")); + // All the queries that have been prepared (we keep them so we can + // re-prepared them when a node fail or a new one join the cluster). + // Note: we could move this down to the session level, but since + // prepared statement are global to a node, this would yield a slightly + // less clear behavior. + final Map preparedQueries = new ConcurrentHashMap(); + private Manager(List contactPoints) throws NoHostAvailableException { this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; @@ -329,6 +338,8 @@ public void onUp(Host host) { if (scheduledAttempt != null) scheduledAttempt.cancel(false); + prepareAllQueries(host); + controlConnection.onUp(host); for (Session s : sessions) s.manager.onUp(host); @@ -368,6 +379,7 @@ protected boolean onUnknownException(Exception e, long nextDelayMs) { public void onAdd(Host host) { logger.trace(String.format("Adding new host %s", host)); + prepareAllQueries(host); controlConnection.onAdd(host); for (Session s : sessions) s.manager.onAdd(host); @@ -394,6 +406,37 @@ public void removeHost(Host host) { onRemove(host); } + // Prepare a query on all nodes + public void prepare(MD5Digest digest, String query, InetSocketAddress toExclude) { + preparedQueries.put(digest, query); + for (Session s : sessions) + s.manager.prepare(query, toExclude); + } + + private void prepareAllQueries(Host host) { + if (preparedQueries.isEmpty()) + return; + + try { + Connection connection = connectionFactory.open(host); + List futures = new ArrayList(preparedQueries.size()); + for (String query : preparedQueries.values()) { + futures.add(connection.write(new PrepareMessage(query))); + } + for (Connection.Future future : futures) { + try { + future.get(); + } catch (InterruptedException e) { + logger.debug("Interupted while preparing queries on new/newly up host", e); + } catch (ExecutionException e) { + logger.debug("Unexpected error while preparing queries on new/newly up host", e); + } + } + } catch (ConnectionException e) { + // Ignore, not a big deal + } + } + // TODO: take a lot or something so that if a a getSchema() is called, // we wait for that to be finished. And maybe avoid multiple refresh at // the same time. diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index cdbc048f63e..52bd9881fc3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -359,7 +359,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { if (handler == null) // TODO: we should handle those with a default handler throw new RuntimeException("No handler set for " + streamId + ", handlers = " + pending); - handler.callback.onSet(response); + handler.callback.onSet(Connection.this, response); } } @@ -388,6 +388,7 @@ public void errorOutAllHandler(ConnectionException ce) { static class Future extends SimpleFuture implements ResponseCallback { private final Message.Request request; + private volatile InetSocketAddress address; public Future(Message.Request request) { this.request = request; @@ -397,18 +398,23 @@ public Message.Request request() { return request; } - public void onSet(Message.Response response) { + public void onSet(Connection connection, Message.Response response) { + this.address = connection.address; super.set(response); } public void onException(Exception exception) { super.setException(exception); } + + public InetSocketAddress getAddress() { + return address; + } } interface ResponseCallback { public Message.Request request(); - public void onSet(Message.Response response); + public void onSet(Connection connection, Message.Response response); public void onException(Exception exception); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index db4b48b3768..d3342d28546 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -1,5 +1,6 @@ package com.datastax.driver.core; +import org.apache.cassandra.utils.MD5Digest; import org.apache.cassandra.transport.messages.ResultMessage; import com.datastax.driver.core.exceptions.DriverInternalError; @@ -16,14 +17,14 @@ public class PreparedStatement { final Columns metadata; - final byte[] id; + final MD5Digest id; - private PreparedStatement(Columns metadata, byte[] id) { + private PreparedStatement(Columns metadata, MD5Digest id) { this.metadata = metadata; this.id = id; } - static PreparedStatement fromMessage(ResultMessage msg) { + static PreparedStatement fromMessage(ResultMessage.Prepared msg) { switch (msg.kind) { case PREPARED: ResultMessage.Prepared pmsg = (ResultMessage.Prepared)msg; @@ -31,7 +32,7 @@ static PreparedStatement fromMessage(ResultMessage msg) { for (int i = 0; i < defs.length; i++) defs[i] = Columns.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); - return new PreparedStatement(new Columns(defs), pmsg.statementId.bytes); + return new PreparedStatement(new Columns(defs), pmsg.statementId); default: throw new DriverInternalError(String.format("%s response received when prepared statement received was expected", msg.kind)); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 734bf1c925f..0853ec4a3ac 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -1,5 +1,6 @@ package com.datastax.driver.core; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.ExecutionException; @@ -162,7 +163,7 @@ public Message.Request request() { return request; } - public void onSet(Message.Response response) { + public void onSet(Connection connection, Message.Response response) { try { switch (response.type) { case RESULT: diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 6fb314bb863..a2baf051bc9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -6,6 +6,7 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.ExecutionException; import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.utils.SimpleFuture; @@ -13,8 +14,10 @@ import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.ErrorMessage; import org.apache.cassandra.transport.messages.ExecuteMessage; +import org.apache.cassandra.transport.messages.PrepareMessage; import org.apache.cassandra.transport.messages.QueryMessage; import org.apache.cassandra.exceptions.UnavailableException; +import org.apache.cassandra.exceptions.PreparedQueryNotFoundException; import org.apache.cassandra.exceptions.ReadTimeoutException; import org.apache.cassandra.exceptions.WriteTimeoutException; @@ -106,16 +109,15 @@ public Message.Request request() { return callback.request(); } - public void onSet(Message.Response response) { + public void onSet(Connection connection, Message.Response response) { switch (response.type) { case RESULT: - callback.onSet(response); + callback.onSet(connection, response); break; case ERROR: ErrorMessage err = (ErrorMessage)response; boolean retry = false; switch (err.error.code()) { - // TODO: Handle cases take into account by the retry policy case READ_TIMEOUT: assert err.error instanceof ReadTimeoutException; ReadTimeoutException rte = (ReadTimeoutException)err.error; @@ -145,17 +147,47 @@ public void onSet(Message.Response response) { if (queryRetries == 0) retry = true; break; + case UNPREPARED: + assert err.error instanceof PreparedQueryNotFoundException; + PreparedQueryNotFoundException pqnf = (PreparedQueryNotFoundException)err.error; + String toPrepare = manager.cluster.manager.preparedQueries.get(pqnf.id); + if (toPrepare == null) { + // This shouldn't happen + String msg = String.format("Tried to execute unknown prepared query %s", pqnf.id); + logger.error(msg); + callback.onException(new DriverInternalError(msg)); + return; + } + + try { + Message.Response prepareResponse = connection.write(new PrepareMessage(toPrepare)).get(); + // TODO check return ? + retry = true; + } catch (InterruptedException e) { + logError(new ConnectionException(connection.address, "Interrupted while preparing query to execute")); + retry(false); + return; + } catch (ExecutionException e) { + logError(new ConnectionException(connection.address, "Unexpected problem while preparing query to execute: " + e.getCause().getMessage())); + retry(false); + return; + } catch (ConnectionException e) { + logger.debug("Connection exception while preparing missing statement", e); + logError(e); + retry(false); + return; + } } if (retry) { ++queryRetries; retry(true); } else { - callback.onSet(response); + callback.onSet(connection, response); } break; default: - callback.onSet(response); + callback.onSet(connection, response); break; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 3d91aa32660..84b62e22db6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -21,8 +21,8 @@ * it makes sense), etc... *

* Session instances are thread-safe and usually a single instance is enough - * per application. However, a given session can only use set to one keyspace - * at a time, so this is really more one instance per keyspace used. + * per application. However, a given session can only be set to one keyspace + * at a time, so one instance per keyspace is necessary. */ public class Session { @@ -127,7 +127,7 @@ public PreparedStatement prepare(String query) { try { Connection.Future future = new Connection.Future(new PrepareMessage(query)); manager.execute(future); - return toPreparedStatement(future); + return toPreparedStatement(query, future); } catch (Exception e) { throw new RuntimeException(e); } @@ -184,12 +184,20 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); } - private PreparedStatement toPreparedStatement(Connection.Future future) { + private PreparedStatement toPreparedStatement(String query, Connection.Future future) { try { Message.Response response = future.get(); switch (response.type) { case RESULT: - return PreparedStatement.fromMessage((ResultMessage)response); + ResultMessage rm = (ResultMessage)response; + switch (rm.kind) { + case PREPARED: + ResultMessage.Prepared pmsg = (ResultMessage.Prepared)rm; + manager.cluster.manager.prepare(pmsg.statementId, query, future.getAddress()); + return PreparedStatement.fromMessage(pmsg); + default: + throw new DriverInternalError(String.format("%s response received when prepared statement received was expected", rm.kind)); + } case ERROR: // TODO: handle errors logger.info("Got " + response); @@ -300,6 +308,32 @@ public void execute(Connection.ResponseCallback callback) { new RetryingCallback(this, callback).sendRequest(); } + public void prepare(String query, InetSocketAddress toExclude) { + for (Map.Entry entry : pools.entrySet()) { + if (entry.getKey().getAddress().equals(toExclude)) + continue; + + // Let's not wait too long if we can't get a connection. Things + // will fix themselves once the user tries a query anyway. + Connection c = null; + try { + c = entry.getValue().borrowConnection(200, TimeUnit.MILLISECONDS); + c.write(new PrepareMessage(query)).get(); + } catch (ConnectionException e) { + // Again, not being able to prepare the query right now is no big deal, so just ignore + } catch (InterruptedException e) { + // Same as above + } catch (ExecutionException e) { + // We shouldn't really get exception while preparing a + // query, so log this (but ignore otherwise as it's not a big deal) + logger.error(String.format("Unexpected error while preparing query (%s) on %s", query, entry.getKey()), e); + } finally { + if (c != null) + entry.getValue().returnConnection(c); + } + } + } + public ResultSet.Future executeQuery(Message.Request msg) { ResultSet.Future future = new ResultSet.Future(this, msg); execute(future.callback); From 07a687ede92ba60b3f135c1ef1c1c0fb708a9dc1 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 15 Oct 2012 17:22:36 +0200 Subject: [PATCH 044/719] Minor cleanups --- .../datastax/driver/core/BoundStatement.java | 50 +++++++++---------- .../java/com/datastax/driver/core/CQLRow.java | 50 +++++++++---------- .../{Columns.java => ColumnDefinitions.java} | 48 ++++-------------- .../driver/core/PreparedStatement.java | 12 ++--- .../com/datastax/driver/core/ResultSet.java | 14 +++--- 5 files changed, 74 insertions(+), 100 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{Columns.java => ColumnDefinitions.java} (83%) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 0f17e51f0de..d6fd03b66d3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -24,7 +24,7 @@ public class BoundStatement { BoundStatement(PreparedStatement statement) { this.statement = statement; - this.values = new ByteBuffer[statement.getVariables().count()]; + this.values = new ByteBuffer[statement.getVariables().size()]; this.remaining = values.length; } @@ -50,7 +50,7 @@ public boolean isReady() { /** * Returns whether the {@code i}th variable has been bound to a value. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @return whether the {@code i}th variable has been bound to a value. */ public boolean isSet(int i) { @@ -90,8 +90,8 @@ public boolean isSet(String name) { */ public BoundStatement bind(Object... values) { - if (values.length > statement.getVariables().count()) - throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.getVariables().count(), values.length)); + if (values.length > statement.getVariables().size()) + throw new IllegalArgumentException(String.format("Prepared statement has only %d variables, %d values provided", statement.getVariables().size(), values.length)); for (int i = 0; i < values.length; i++) { @@ -170,7 +170,7 @@ public BoundStatement bind(Object... values) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. */ public BoundStatement setBool(int i, boolean v) { @@ -196,7 +196,7 @@ public BoundStatement setBool(String name, boolean v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is of neither of the * following types: INT, TIMESTAMP, BIGINT, COUNTER or VARINT. */ @@ -240,7 +240,7 @@ public BoundStatement setInt(String name, int v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is of neither of the * following types: BIGINT, TIMESTAMP, COUNTER or VARINT. */ @@ -281,7 +281,7 @@ public BoundStatement setLong(String name, long v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. */ public BoundStatement setDate(int i, Date v) { @@ -307,7 +307,7 @@ public BoundStatement setDate(String name, Date v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is of neither of the * following types: FLOAT, DOUBLE or DECIMAL. */ @@ -347,7 +347,7 @@ public BoundStatement setFloat(String name, float v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is of neither of the * following types: DOUBLE or DECIMAL. */ @@ -383,7 +383,7 @@ public BoundStatement setDouble(String name, double v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is of neither of the * following types: VARCHAR, TEXT or ASCII. */ @@ -425,7 +425,7 @@ public BoundStatement setString(String name, String v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type BLOB. */ public BoundStatement setByteBuffer(int i, ByteBuffer v) { @@ -460,7 +460,7 @@ public BoundStatement setByteBuffer(String name, ByteBuffer v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. */ public BoundStatement setByteBufferUnsafe(int i, ByteBuffer v) { return setValue(i, v.duplicate()); @@ -492,7 +492,7 @@ public BoundStatement setByteBufferUnsafe(String name, ByteBuffer v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type BLOB. */ public BoundStatement setBytes(int i, byte[] v) { @@ -521,10 +521,10 @@ public BoundStatement setBytes(String name, byte[] v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type VARINT. */ - public BoundStatement setVarInt(int i, BigInteger v) { + public BoundStatement setVarint(int i, BigInteger v) { metadata().checkType(i, DataType.Native.VARINT); return setValue(i, IntegerType.instance.decompose(v)); } @@ -538,8 +538,8 @@ public BoundStatement setVarInt(int i, BigInteger v) { * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. * @throws InvalidTypeException if column {@code name} is not of type VARINT. */ - public BoundStatement setVarInt(String name, BigInteger v) { - return setVarInt(metadata().getIdx(name), v); + public BoundStatement setVarint(String name, BigInteger v) { + return setVarint(metadata().getIdx(name), v); } /** @@ -547,7 +547,7 @@ public BoundStatement setVarInt(String name, BigInteger v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. */ public BoundStatement setDecimal(int i, BigDecimal v) { @@ -573,7 +573,7 @@ public BoundStatement setDecimal(String name, BigDecimal v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type UUID or * TIMEUUID, or if columm {@code i} is of type TIMEUUID but {@code v} is * not a type 1 UUID. @@ -610,7 +610,7 @@ public BoundStatement setUUID(String name, UUID v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type INET. */ public BoundStatement setInet(int i, InetAddress v) { @@ -636,7 +636,7 @@ public BoundStatement setInet(String name, InetAddress v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not a list type or * if the elements of {@code v} are not of the type of the elements of * column {@code i}. @@ -679,7 +679,7 @@ public BoundStatement setList(String name, List v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not a map type or * if the elements (keys or values) of {@code v} are not of the type of the * elements of column {@code i}. @@ -725,7 +725,7 @@ public BoundStatement setMap(String name, Map v) { * * @return this BoundStatement. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not a set type or * if the elements of {@code v} are not of the type of the elements of * column {@code i}. @@ -762,7 +762,7 @@ public BoundStatement setSet(String name, Set v) { return setSet(metadata().getIdx(name), v); } - private Columns metadata() { + private ColumnDefinitions metadata() { return statement.metadata; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 433e34500d7..d57df122157 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -15,15 +15,15 @@ */ public class CQLRow { - private final Columns metadata; + private final ColumnDefinitions metadata; private final List data; - private CQLRow(Columns metadata, List data) { + private CQLRow(ColumnDefinitions metadata, List data) { this.metadata = metadata; this.data = data; } - static CQLRow fromData(Columns metadata, List data) { + static CQLRow fromData(ColumnDefinitions metadata, List data) { if (data == null) return null; @@ -35,7 +35,7 @@ static CQLRow fromData(Columns metadata, List data) { * * @return the columns contained in this CQLRow. */ - public Columns getColumns() { + public ColumnDefinitions getColumnDefinitions() { return metadata; } @@ -45,7 +45,7 @@ public Columns getColumns() { * @param i the index of the column to check. * @return whether the {@code i}th value of this row is NULL. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. */ public boolean isNull(int i) { metadata.checkBounds(i); @@ -72,7 +72,7 @@ public boolean isNull(String name) { * @return the boolean value of the {@code i}th column in this row. If the * value is NULL, {@code false} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. */ public boolean getBool(int i) { @@ -107,7 +107,7 @@ public boolean getBool(String name) { * @return the value of the {@code i}th column in this row as an integer. If the * value is NULL, {@code 0} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type INT. */ public int getInt(int i) { @@ -142,7 +142,7 @@ public int getInt(String name) { * @return the value of the {@code i}th column in this row as a long. If the * value is NULL, {@code 0L} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} type is not one of: BIGINT, TIMESTAMP, * INT or COUNTER. */ @@ -184,7 +184,7 @@ public long getLong(String name) { * @return the value of the {@code i}th column in this row as a data. If the * value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. */ public Date getDate(int i) { @@ -219,7 +219,7 @@ public Date getDate(String name) { * @return the value of the {@code i}th column in this row as a float. If the * value is NULL, {@code 0.0f} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type FLOAT. */ public float getFloat(int i) { @@ -254,7 +254,7 @@ public float getFloat(String name) { * @return the value of the {@code i}th column in this row as a double. If the * value is NULL, {@code 0.0} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type * DOUBLE or FLOAT. */ @@ -299,7 +299,7 @@ public double getDouble(String name) { * @return the value of the {@code i}th column in this row as a ByteBuffer. If the * value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. */ public ByteBuffer getByteBuffer(int i) { metadata.checkBounds(i); @@ -342,7 +342,7 @@ public ByteBuffer getByteBuffer(String name) { * @return the value of the {@code i}th column in this row as a byte array. If the * value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. */ public byte[] getBytes(int i) { ByteBuffer bb = getByteBuffer(i); @@ -377,7 +377,7 @@ public byte[] getBytes(String name) { * @return the value of the {@code i}th column in this row as a string. If the * value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} type is none of: * VARCHAR, TEXT or ASCII. */ @@ -418,10 +418,10 @@ public String getString(String name) { * @return the value of the {@code i}th column in this row as a variable * length integer. If the value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type VARINT. */ - public BigInteger getVarInt(int i) { + public BigInteger getVarint(int i) { metadata.checkType(i, DataType.Native.VARINT); ByteBuffer value = data.get(i); @@ -442,8 +442,8 @@ public BigInteger getVarInt(int i) { * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. * @throws InvalidTypeException if column {@code name} is not of type VARINT. */ - public BigInteger getVarInt(String name) { - return getVarInt(metadata.getIdx(name)); + public BigInteger getVarint(String name) { + return getVarint(metadata.getIdx(name)); } /** @@ -453,7 +453,7 @@ public BigInteger getVarInt(String name) { * @return the value of the {@code i}th column in this row as a variable * length decimal. If the value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. */ public BigDecimal getDecimal(int i) { @@ -488,7 +488,7 @@ public BigDecimal getDecimal(String name) { * @return the value of the {@code i}th column in this row as a UUID. * If the value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type UUID * or TIMEUUID. */ @@ -527,7 +527,7 @@ public UUID getUUID(String name) { * @return the value of the {@code i}th column in this row as an InetAddress. * If the value is NULL, {@code null} is returned. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not of type INET. */ public InetAddress getInet(int i) { @@ -566,7 +566,7 @@ public InetAddress getInet(String name) { * returned (note that Cassandra makes no difference between an empty list * and column of type list that is not set). * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not a list or if its * elements are not of class {@code elementsClass}. */ @@ -621,7 +621,7 @@ public List getList(String name, Class elementsClass) { * returned (note that Cassandra makes no difference between an empty set * and column of type set that is not set). * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not a set or if its * elements are not of class {@code elementsClass}. */ @@ -671,7 +671,7 @@ public Set getSet(String name, Class elementsClass) { * an empty map is returned (note that Cassandra makes no difference * between an empty map and column of type map that is not set). * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().count()}. + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. * @throws InvalidTypeException if column {@code i} is not a map, if its * keys are not of class {@code keysClass} or if its values are not of * class {@code valuesClass}. @@ -719,7 +719,7 @@ public Map getMap(String name, Class keysClass, Class valuesC public String toString() { StringBuilder sb = new StringBuilder(); sb.append("CQLRow["); - for (int i = 0; i < metadata.count(); i++) { + for (int i = 0; i < metadata.size(); i++) { if (i != 0) sb.append(", "); ByteBuffer bb = data.get(i); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Columns.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java similarity index 83% rename from driver-core/src/main/java/com/datastax/driver/core/Columns.java rename to driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java index 9def43ef7b9..566abdedca5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Columns.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java @@ -10,14 +10,14 @@ * Metadata describing the columns returned in a {@link ResultSet} or a * {@link PreparedStatement}. */ -public class Columns implements Iterable { +public class ColumnDefinitions implements Iterable { - static final Columns EMPTY = new Columns(new Definition[0]); + static final ColumnDefinitions EMPTY = new ColumnDefinitions(new Definition[0]); private final Definition[] byIdx; private final Map byName; - Columns(Definition[] defs) { + ColumnDefinitions(Definition[] defs) { this.byIdx = defs; this.byName = new HashMap(defs.length); @@ -32,7 +32,7 @@ public class Columns implements Iterable { * * @return the number of columns described by this metadata. */ - public int count() { + public int size() { return byIdx.length; } @@ -66,38 +66,12 @@ public List asList() { return Arrays.asList(byIdx); } - /** - * Returns the list of the names for the columns defined in these metadata. - * - * @return the list of the names for the columns defined in these metadata. - * The names in the returned list will be in the order of this metadata. - */ - public List getNames() { - List names = new ArrayList(byIdx.length); - for (Definition def : byIdx) - names.add(def.name); - return names; - } - - /** - * Returns the list of the types for the columns defined in these metadata. - * - * @return the list of the types for the columns defined in these metadata. - * The types in the returned list will be in the order of this metadata. - */ - public List getTypes() { - List types = new ArrayList(byIdx.length); - for (Definition def : byIdx) - types.add(def.type); - return types; - } - /** * Returns the name of the {@code i}th column in this metadata. * * @return the name of the {@code i}th column in this metadata. * - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} */ public String getName(int i) { return byIdx[i].name; @@ -108,7 +82,7 @@ public String getName(int i) { * * @return the type of the {@code i}th column in this metadata. * - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} */ public DataType getType(int i) { return byIdx[i].type; @@ -121,7 +95,7 @@ public DataType getType(int i) { * * @throws IllegalArgumentException if {@code name} is not one of the columns in this metadata. */ - public DataType geType(String name) { + public DataType getType(String name) { return getType(getIdx(name)); } @@ -130,7 +104,7 @@ public DataType geType(String name) { * * @return the keyspace of the {@code i}th column in this metadata. * - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} */ public String getKeyspace(int i) { return byIdx[i].keyspace; @@ -152,7 +126,7 @@ public String getKeyspace(String name) { * * @return the table of the {@code i}th column in this metadata. * - * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= count()} + * @throws IndexOutOfBoundsException if {@code i < 0} or {@code i >= size()} */ public String getTable(int i) { return byIdx[i].table; @@ -172,7 +146,7 @@ public String getTable(String name) { public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Columns["); - for (int i = 0; i < count(); i++) { + for (int i = 0; i < size(); i++) { if (i != 0) sb.append(", "); Definition def = byIdx[i]; @@ -191,7 +165,7 @@ int getIdx(String name) { } void checkBounds(int i) { - if (i < 0 || i >= count()) + if (i < 0 || i >= size()) throw new ArrayIndexOutOfBoundsException(i); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index d3342d28546..a8856f88cd7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -16,10 +16,10 @@ */ public class PreparedStatement { - final Columns metadata; + final ColumnDefinitions metadata; final MD5Digest id; - private PreparedStatement(Columns metadata, MD5Digest id) { + private PreparedStatement(ColumnDefinitions metadata, MD5Digest id) { this.metadata = metadata; this.id = id; } @@ -28,11 +28,11 @@ static PreparedStatement fromMessage(ResultMessage.Prepared msg) { switch (msg.kind) { case PREPARED: ResultMessage.Prepared pmsg = (ResultMessage.Prepared)msg; - Columns.Definition[] defs = new Columns.Definition[pmsg.metadata.names.size()]; + ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[pmsg.metadata.names.size()]; for (int i = 0; i < defs.length; i++) - defs[i] = Columns.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); + defs[i] = ColumnDefinitions.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); - return new PreparedStatement(new Columns(defs), pmsg.statementId); + return new PreparedStatement(new ColumnDefinitions(defs), pmsg.statementId); default: throw new DriverInternalError(String.format("%s response received when prepared statement received was expected", msg.kind)); } @@ -43,7 +43,7 @@ static PreparedStatement fromMessage(ResultMessage.Prepared msg) { * * @return the variables bounded in this prepared statement. */ - public Columns getVariables() { + public ColumnDefinitions getVariables() { return metadata; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 0853ec4a3ac..a310cff3470 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -29,12 +29,12 @@ public class ResultSet implements Iterable { private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); - private static final ResultSet EMPTY = new ResultSet(Columns.EMPTY, new ArrayDeque(0)); + private static final ResultSet EMPTY = new ResultSet(ColumnDefinitions.EMPTY, new ArrayDeque(0)); - private final Columns metadata; + private final ColumnDefinitions metadata; private final Queue> rows; - private ResultSet(Columns metadata, Queue> rows) { + private ResultSet(ColumnDefinitions metadata, Queue> rows) { this.metadata = metadata; this.rows = rows; @@ -46,11 +46,11 @@ private static ResultSet fromMessage(ResultMessage msg) { return EMPTY; case ROWS: ResultMessage.Rows r = (ResultMessage.Rows)msg; - Columns.Definition[] defs = new Columns.Definition[r.result.metadata.names.size()]; + ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[r.result.metadata.names.size()]; for (int i = 0; i < defs.length; i++) - defs[i] = Columns.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); + defs[i] = ColumnDefinitions.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); - return new ResultSet(new Columns(defs), new ArrayDeque(r.result.rows)); + return new ResultSet(new ColumnDefinitions(defs), new ArrayDeque(r.result.rows)); case SET_KEYSPACE: case SCHEMA_CHANGE: return EMPTY; @@ -67,7 +67,7 @@ private static ResultSet fromMessage(ResultMessage msg) { * * @return the columns returned in this ResultSet. */ - public Columns getColumns() { + public ColumnDefinitions getColumnDefinitions() { return metadata; } From 0b816ff4fb168a92cd11533df983c4810ab04edc Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 15 Oct 2012 19:38:55 +0200 Subject: [PATCH 045/719] Rip out beginning of support for custom types (we'll see later ... maybe) --- .../datastax/driver/core/BoundStatement.java | 122 ++++++++---------- .../java/com/datastax/driver/core/Codec.java | 16 +-- .../com/datastax/driver/core/DataType.java | 85 +++++------- 3 files changed, 92 insertions(+), 131 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index d6fd03b66d3..b7e9077b5e1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -97,68 +97,60 @@ public BoundStatement bind(Object... values) { { Object toSet = values[i]; DataType columnType = statement.getVariables().getType(i); - switch (columnType.getKind()) - { - case NATIVE: - if (!Codec.isCompatible(columnType.asNative(), toSet.getClass())) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but %s provided", i, columnType, toSet.getClass())); - break; - case COLLECTION: - switch (columnType.asCollection().collectionType()) - { - case LIST: - if (!(toSet instanceof List)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column is a list but %s provided", i, toSet.getClass())); - - List l = (List)toSet; - // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type - if (!l.isEmpty()) { - // Ugly? Yes - Class klass = l.get(0).getClass(); - DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); - if (!Codec.isCompatible(eltType, klass)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided list value are %s", i, columnType, klass)); - } - break; - case SET: - if (!(toSet instanceof Set)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column is a set but %s provided", i, toSet.getClass())); - - Set s = (Set)toSet; - // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type - if (!s.isEmpty()) { - // Ugly? Yes - Class klass = s.iterator().next().getClass(); - DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); - if (!Codec.isCompatible(eltType, klass)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided set value are %s", i, columnType, klass)); - } - break; - case MAP: - if (!(toSet instanceof Map)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column is a map but %s provided", i, toSet.getClass())); - - Map m = (Map)toSet; - // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type - if (!m.isEmpty()) { - // Ugly? Yes - Map.Entry entry = (Map.Entry)m.entrySet().iterator().next(); - Class keysClass = entry.getKey().getClass(); - Class valuesClass = entry.getValue().getClass(); - - DataType.Collection.Map mapType = (DataType.Collection.Map)columnType; - DataType.Native keysType = (DataType.Native)mapType.getKeysType(); - DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); - if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type %s conflicts with provided type %s", i, mapType, toSet.getClass())); - } - break; - - } - break; - case CUSTOM: - // TODO: Not sure how to handle that though - throw new UnsupportedOperationException(); + if (columnType.isCollection()) { + switch (columnType.asCollection().getKind()) { + case LIST: + if (!(toSet instanceof List)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a list but %s provided", i, toSet.getClass())); + + List l = (List)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!l.isEmpty()) { + // Ugly? Yes + Class klass = l.get(0).getClass(); + DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); + if (!Codec.isCompatible(eltType, klass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided list value are %s", i, columnType, klass)); + } + break; + case SET: + if (!(toSet instanceof Set)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a set but %s provided", i, toSet.getClass())); + + Set s = (Set)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!s.isEmpty()) { + // Ugly? Yes + Class klass = s.iterator().next().getClass(); + DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); + if (!Codec.isCompatible(eltType, klass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided set value are %s", i, columnType, klass)); + } + break; + case MAP: + if (!(toSet instanceof Map)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a map but %s provided", i, toSet.getClass())); + + Map m = (Map)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!m.isEmpty()) { + // Ugly? Yes + Map.Entry entry = (Map.Entry)m.entrySet().iterator().next(); + Class keysClass = entry.getKey().getClass(); + Class valuesClass = entry.getValue().getClass(); + + DataType.Collection.Map mapType = (DataType.Collection.Map)columnType; + DataType.Native keysType = (DataType.Native)mapType.getKeysType(); + DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); + if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type %s conflicts with provided type %s", i, mapType, toSet.getClass())); + } + break; + + } + } else { + if (!Codec.isCompatible(columnType.asNative(), toSet.getClass())) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but %s provided", i, columnType, toSet.getClass())); } setValue(i, Codec.getCodec(columnType).decompose(toSet)); } @@ -643,7 +635,7 @@ public BoundStatement setInet(String name, InetAddress v) { */ public BoundStatement setList(int i, List v) { DataType type = metadata().getType(i); - if (type.getKind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.LIST) + if (!type.isCollection() || type.asCollection().getKind() != DataType.Collection.Kind.LIST) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", metadata().getName(i), type)); // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type @@ -686,7 +678,7 @@ public BoundStatement setList(String name, List v) { */ public BoundStatement setMap(int i, Map v) { DataType type = metadata().getType(i); - if (type.getKind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.MAP) + if (!type.isCollection() || type.asCollection().getKind() != DataType.Collection.Kind.MAP) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", metadata().getName(i), type)); if (!v.isEmpty()) { @@ -732,7 +724,7 @@ public BoundStatement setMap(String name, Map v) { */ public BoundStatement setSet(int i, Set v) { DataType type = metadata().getType(i); - if (type.getKind() != DataType.Kind.COLLECTION || type.asCollection().collectionType() != DataType.Collection.Type.SET) + if (!type.isCollection() || type.asCollection().getKind() != DataType.Collection.Kind.SET) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", metadata().getName(i), type)); if (!v.isEmpty()) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java index 73c9aa49cd7..d3f50941445 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -38,12 +38,10 @@ class Codec { private Codec() {} public static AbstractType getCodec(DataType type) { - switch (type.getKind()) { - case NATIVE: return (AbstractType)nativeCodec(type.asNative()); - case COLLECTION: return (AbstractType)collectionCodec(type.asCollection()); - case CUSTOM: return (AbstractType)customCodec(type.asCustom()); - default: throw new RuntimeException("Unknow data type kind"); - } + if (type.isCollection()) + return (AbstractType)collectionCodec(type.asCollection()); + else + return (AbstractType)nativeCodec(type.asNative()); } private static AbstractType nativeCodec(DataType.Native type) { @@ -71,7 +69,7 @@ private static AbstractType nativeCodec(DataType.Native type) { private static AbstractType collectionCodec(DataType.Collection type) { - switch (type.collectionType()) { + switch (type.getKind()) { case LIST: AbstractType listElts = getCodec(((DataType.Collection.List)type).getElementsType()); return ListType.getInstance(listElts); @@ -88,10 +86,6 @@ private static AbstractType collectionCodec(DataType.Collection type) { } } - private static AbstractType customCodec(DataType.Custom type) { - return null; - } - public static DataType rawTypeToDataType(AbstractType rawType) { DataType type = rawNativeMap.get(rawType); if (type != null) diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index eeb8dda75b4..2d9c7163ee5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -6,27 +6,26 @@ public interface DataType { /** - * The three kind of type supported by Cassandra. + * A Cassandra type. * - * The {@code NATIVE} types supported by Cassandra are described in the - * CQL documentation, - * and more information on such type can be obtained using the {#asNative} - * method. + * There is two family of type: the native ones and the collection ones. + * One can decide if the type is a native type of a collection one using + * the {@link #isCollection method}. * - * The {@code COLLECTION} types the maps, lists and sets. More information - * on such type can be obtained using the {#asCollection} method. + * The {@code NATIVE} types are described in the + * CQL documentation. * - * The {@code CUSTOM} types are user defined types. More information on - * such type can be obtained using the {#asCustom} method. + * The {@code COLLECTION} types are the maps, lists and sets. */ public enum Kind { NATIVE, COLLECTION, CUSTOM } /** - * Returns this type {@link Kind}. + * Returns whether this type is a collection type. * - * @return this type {@link Kind}. + * @return {@code true} if the type is a collection one, {@code false} if + * it is a native type. */ - public Kind getKind(); + public boolean isCollection(); /** * Returns this type as a {@link Native} type. @@ -34,7 +33,7 @@ public enum Kind { NATIVE, COLLECTION, CUSTOM } * @return this type as a {@link Native} type. * * @throws IllegalStateException if this type is not a {@link Native} type. - * You should use {@link #getKind} to check if this type is a native one + * You should use {@link #isCollection} to check if this type is a native one * before calling this method. */ public Native asNative(); @@ -45,22 +44,11 @@ public enum Kind { NATIVE, COLLECTION, CUSTOM } * @return this type as a {@link Collection} type. * * @throws IllegalStateException if this type is not a {@link Collection} - * type. You should use {@link #getKind} to check if this type is a collection - * one before calling this method. + * type. You should use {@link #isCollection} to check if this type is a + * collection one before calling this method. */ public Collection asCollection(); - /** - * Returns this type as a {@link Custom} type. - * - * @return this type as a {@link Custom} type. - * - * @throws IllegalStateException if this type is not a {@link Custom} type. - * You should use {@link #getKind} to check if this type is a custom one - * before calling this method. - */ - public Custom asCustom(); - /** * Native types supported by cassandra. */ @@ -83,11 +71,9 @@ public enum Native implements DataType { VARINT, TIMEUUID; - public Kind getKind() { return Kind.NATIVE; } - + public boolean isCollection() { return false; } public Native asNative() { return this; } public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a native one"); } - public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a native one"); } @Override public String toString() { @@ -100,27 +86,29 @@ public String toString() { */ public static abstract class Collection implements DataType { - // TODO: Type is a very ugly/confusing name - public enum Type { LIST, SET, MAP }; + /** + * The kind of collection a collection type represents. + */ + public enum Kind { LIST, SET, MAP }; - private final Type type; + private final Kind kind; - protected Collection(Type type) { - this.type = type; + protected Collection(Kind kind) { + this.kind = kind; } - public Kind getKind() { return Kind.COLLECTION; } + public boolean isCollection() { return true; } /** - * The type of collection. + * The kind of collection this type represents. * - * @return the type of collection. + * @return the kind of collection (list, set or map) this type + * represents. */ - public Type collectionType() { return type; } + public Kind getKind() { return kind; } public Native asNative() { throw new IllegalStateException("Not a native type, but a collection one"); } public Collection asCollection() { return this; } - public Custom asCustom() { throw new IllegalStateException("Not a custom type, but a collection one"); } /** * The type of lists. @@ -134,7 +122,7 @@ public static class List extends Collection { * @param elementsType the type of the elements of the list. */ public List(DataType elementsType) { - super(Type.LIST); + super(Kind.LIST); this.elementsType = elementsType; } @@ -165,7 +153,7 @@ public static class Set extends Collection { * @param elementsType the type of the elements of the set. */ public Set(DataType elementsType) { - super(Type.SET); + super(Kind.SET); this.elementsType = elementsType; } @@ -198,7 +186,7 @@ public static class Map extends Collection { * @param valuesType the type of the keys of the map. */ public Map(DataType keysType, DataType valuesType) { - super(Type.MAP); + super(Kind.MAP); this.keysType = keysType; this.valuesType = valuesType; } @@ -227,17 +215,4 @@ public String toString() { } } } - - /** - * A used defined custom type. - */ - public static class Custom implements DataType { - // TODO - - public Kind getKind() { return Kind.CUSTOM; } - - public Native asNative() { throw new IllegalStateException("Not a native type, but a custom one"); } - public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a custom one"); } - public Custom asCustom() { return this; } - } } From 76b2e7d775db98ad919ac22b2e865da97e29ca70 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 16 Oct 2012 10:15:18 +0200 Subject: [PATCH 046/719] Fix thread safety of StreamIdHandler --- .../com/datastax/driver/core/Connection.java | 2 +- .../driver/core/StreamIdGenerator.java | 69 +++++++++++++++++++ .../datastax/driver/core/StreamIdHandler.java | 31 --------- .../driver/core/StreamIdGeneratorTest.java | 36 ++++++++++ 4 files changed, 106 insertions(+), 32 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 52bd9881fc3..40018e83e11 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -326,7 +326,7 @@ public DefaultResponseHandler defaultHandler() { // dispatcher that assume synchronous? private class Dispatcher extends SimpleChannelUpstreamHandler { - public final StreamIdHandler streamIdHandler = new StreamIdHandler(); + public final StreamIdGenerator streamIdHandler = new StreamIdGenerator(); private final ConcurrentMap pending = new ConcurrentHashMap(); public void add(ResponseHandler handler) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java new file mode 100644 index 00000000000..8b6cd036e92 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java @@ -0,0 +1,69 @@ +package com.datastax.driver.core; + +import java.util.BitSet; +import java.util.concurrent.atomic.AtomicLongArray; + +/** + * Handle assigning stream id to message. + */ +class StreamIdGenerator { + + private static final long MAX_UNSIGNED_LONG = -1L; + + // Stream IDs are one byte long, signed and we only handle positive values + // (negative stream IDs are for server side initiated streams). So we have + // 128 different stream IDs and two longs are enough. + private final AtomicLongArray bits = new AtomicLongArray(2); + + public StreamIdGenerator() { + bits.set(0, MAX_UNSIGNED_LONG); + bits.set(1, MAX_UNSIGNED_LONG); + } + + public int next() { + int id = atomicGetAndSetFirstAvailable(0); + if (id >= 0) + return id; + + id = atomicGetAndSetFirstAvailable(1); + if (id >= 0) + return 64 + id; + + // TODO: Throw a BusyConnectionException and handle it in the connection pool + throw new IllegalStateException(); + } + + public void release(int streamId) { + if (streamId < 64) { + atomicClear(0, streamId); + } else { + atomicClear(1, streamId - 64); + } + } + + // Returns >= 0 if found and set an id, -1 if no bits are available. + public int atomicGetAndSetFirstAvailable(int idx) { + while (true) { + long l = bits.get(idx); + if (l == 0) + return -1; + + // Find the position of the right-most 1-bit + int id = Long.numberOfTrailingZeros(Long.lowestOneBit(l)); + if (bits.compareAndSet(idx, l, l ^ mask(id))) + return id; + } + } + + public void atomicClear(int idx, int toClear) { + while (true) { + long l = bits.get(idx); + if (bits.compareAndSet(idx, l, l | mask(toClear))) + return; + } + } + + private static long mask(int id) { + return 1L << id; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java deleted file mode 100644 index c1cf830573d..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/StreamIdHandler.java +++ /dev/null @@ -1,31 +0,0 @@ -package com.datastax.driver.core; - -import java.util.BitSet; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Handle assigning stream id to message. - */ -class StreamIdHandler { - - // Stream ids are one byte long, signed and we only handle positive values ourselves. - private static final int STREAM_ID_COUNT = 128; - - // Keep one bit to know which one is in use. - private final BitSet usedIds = new BitSet(STREAM_ID_COUNT); - private final AtomicInteger idx = new AtomicInteger(0); - - public int next() { - int next = idx.getAndIncrement() % STREAM_ID_COUNT; - // Note: we could be fancier, and "search" for the next available idx, - // though that could be race prone, so doing the simplest thing for now - if (usedIds.get(next)) - // TODO: Throw a BusyConnectionException and handle it in the connection pool - throw new IllegalStateException(); - return next; - } - - public void release(int streamId) { - usedIds.clear(streamId); - } -} diff --git a/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java b/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java new file mode 100644 index 00000000000..fc8168ab951 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java @@ -0,0 +1,36 @@ +package com.datastax.driver.core; + +import org.junit.Test; +import static junit.framework.Assert.*; + +public class StreamIdGeneratorTest { + + @Test + public void SimpleGenIdTest() { + + StreamIdGenerator generator = new StreamIdGenerator(); + + assertEquals(0, generator.next()); + assertEquals(1, generator.next()); + generator.release(0); + assertEquals(0, generator.next()); + assertEquals(2, generator.next()); + assertEquals(3, generator.next()); + generator.release(1); + assertEquals(1, generator.next()); + assertEquals(4, generator.next()); + + for (int i = 5; i < 128; i++) + assertEquals(i, generator.next()); + + generator.release(100); + assertEquals(100, generator.next()); + + try { + generator.next(); + fail("No more streamId should be available"); + } catch (IllegalStateException e) { + // Ok, expected + } + } +} From 5609cbe4029aefac204a975e344f472cca8b16fc Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 16 Oct 2012 19:36:44 +0200 Subject: [PATCH 047/719] Handle exceptions correctly in prepareStatement --- .../com/datastax/driver/core/ResultSet.java | 14 +++-- .../com/datastax/driver/core/Session.java | 58 ++++++++++++------- 2 files changed, 45 insertions(+), 27 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index a310cff3470..c36f64137d2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -250,7 +250,7 @@ public ResultSet getUninterruptibly() throws NoHostAvailableException, QueryExec } } } catch (ExecutionException e) { - extractCause(e); + extractCauseFromExecutionException(e); throw new AssertionError(); } } @@ -293,13 +293,16 @@ public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAv } } } catch (ExecutionException e) { - extractCause(e); + extractCauseFromExecutionException(e); throw new AssertionError(); } } - private static void extractCause(ExecutionException e) throws NoHostAvailableException, QueryExecutionException { - Throwable cause = e.getCause(); + static void extractCauseFromExecutionException(ExecutionException e) throws NoHostAvailableException, QueryExecutionException { + extractCause(e.getCause()); + } + + static void extractCause(Throwable cause) throws NoHostAvailableException, QueryExecutionException { if (cause instanceof NoHostAvailableException) throw (NoHostAvailableException)cause; else if (cause instanceof QueryExecutionException) @@ -310,8 +313,9 @@ else if (cause instanceof DriverUncheckedException) throw new DriverInternalError("Unexpected exception thrown", cause); } + // TODO: Convert to some internal exception - private Exception convertException(org.apache.cassandra.exceptions.TransportException te) { + static Exception convertException(org.apache.cassandra.exceptions.TransportException te) { switch (te.code()) { case SERVER_ERROR: diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 84b62e22db6..49569f7b51c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -121,16 +121,14 @@ public ResultSet.Future executeAsync(CQLQuery query) { * * @param query the CQL query to prepare * @return the prepared statement corresponding to {@code query}. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. */ - public PreparedStatement prepare(String query) { - // TODO: Deal with exceptions - try { - Connection.Future future = new Connection.Future(new PrepareMessage(query)); - manager.execute(future); - return toPreparedStatement(query, future); - } catch (Exception e) { - throw new RuntimeException(e); - } + public PreparedStatement prepare(String query) throws NoHostAvailableException { + Connection.Future future = new Connection.Future(new PrepareMessage(query)); + manager.execute(future); + return toPreparedStatement(query, future); } /** @@ -139,9 +137,12 @@ public PreparedStatement prepare(String query) { * @param query the CQL query to prepare * @return the prepared statement corresponding to {@code query}. * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * * @see #prepare(String) */ - public PreparedStatement prepare(CQLQuery query) { + public PreparedStatement prepare(CQLQuery query) throws NoHostAvailableException { return prepare(query.toString()); } @@ -184,9 +185,24 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); } - private PreparedStatement toPreparedStatement(String query, Connection.Future future) { + private PreparedStatement toPreparedStatement(String query, Connection.Future future) throws NoHostAvailableException { + try { - Message.Response response = future.get(); + Message.Response response = null; + try { + while (response == null) { + try { + response = future.get(); + } catch (InterruptedException e) { + // TODO: decide wether we want to expose Interrupted exceptions or not + } + } + } catch (ExecutionException e) { + ResultSet.Future.extractCauseFromExecutionException(e); + throw new AssertionError(); + } + + assert response != null; switch (response.type) { case RESULT: ResultMessage rm = (ResultMessage)response; @@ -196,20 +212,18 @@ private PreparedStatement toPreparedStatement(String query, Connection.Future fu manager.cluster.manager.prepare(pmsg.statementId, query, future.getAddress()); return PreparedStatement.fromMessage(pmsg); default: - throw new DriverInternalError(String.format("%s response received when prepared statement received was expected", rm.kind)); + throw new DriverInternalError(String.format("%s response received when prepared statement was expected", rm.kind)); } case ERROR: - // TODO: handle errors - logger.info("Got " + response); - return null; + ResultSet.Future.extractCause(ResultSet.Future.convertException(((ErrorMessage)response).error)); + break; default: - // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - logger.info("Got " + response); - return null; + throw new DriverInternalError(String.format("%s response received when prepared statement was expected", response.type)); } - } catch (Exception e) { - // TODO: do better - throw new RuntimeException(e); + throw new AssertionError(); + } catch (QueryExecutionException e) { + // Preparing a statement cannot throw any of the QueryExecutionException + throw new DriverInternalError("Received unexpected QueryExecutionException while preparing statement", e); } } From a79b610d6211a9af6500b9fbccff0af445342f48 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 16 Oct 2012 19:37:50 +0200 Subject: [PATCH 048/719] Add dc/rack info to Host. Incomplete until #4814 --- .../com/datastax/driver/core/Cluster.java | 3 +- .../java/com/datastax/driver/core/Codec.java | 7 ++-- .../datastax/driver/core/ColumnMetadata.java | 11 +++--- .../driver/core/ControlConnection.java | 36 ++++++++++++------- .../java/com/datastax/driver/core/Host.java | 36 +++++++++++++++++++ 5 files changed, 69 insertions(+), 24 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 190a17e995f..937fb6582f6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -392,10 +392,11 @@ public void onRemove(Host host) { s.manager.onRemove(host); } - public void addHost(InetSocketAddress address, boolean signal) { + public Host addHost(InetSocketAddress address, boolean signal) { Host newHost = metadata.add(address); if (newHost != null && signal) onAdd(newHost); + return newHost; } public void removeHost(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java index d3f50941445..5793e6db53d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -7,6 +7,7 @@ import java.util.*; import com.datastax.driver.core.DataType; +import com.datastax.driver.core.exceptions.DriverInternalError; import org.apache.cassandra.cql3.ColumnSpecification; import org.apache.cassandra.db.marshal.*; @@ -105,12 +106,10 @@ public static DataType rawTypeToDataType(AbstractType rawType) { DataType mapValues = rawTypeToDataType(mt.values); return new DataType.Collection.Map(mapKeys, mapValues); default: - throw new RuntimeException("Unknown collection type"); + throw new DriverInternalError("Unknown collection type"); } } - - // TODO: handle custom - return null; + throw new DriverInternalError("Unknown type: " + rawType); } public static boolean isCompatible(DataType.Native type, Class klass) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index 6751858815f..fed1d36889f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -88,9 +88,9 @@ public static class IndexMetadata { private final ColumnMetadata column; private final String name; - // It doesn't make sense to expose the index type for CQL3 at this - // point (the notion don't exist yet in CQL), but keeping it internally - // so we don't forget it exists + // It doesn't make sense to expose the index type, not the index + // options for CQL3 at this point since the notion doesn't exist yet in CQL3. + // But keeping the variable internally so we don't forget it exists. private final String type; private final Map options = new HashMap(); @@ -121,8 +121,8 @@ public String getName() { /** * Returns a CQL query representing this index. * - * This method returns a single 'CREATE INDEX' query with the options - * corresponding to this index definition. + * This method returns a single 'CREATE INDEX' query corresponding to + * this index definition. * * @return the 'CREATE INDEX' query corresponding to this index. */ @@ -140,7 +140,6 @@ private static IndexMetadata build(ColumnMetadata column, CQLRow row) { return null; IndexMetadata index = new IndexMetadata(column, type, row.getString(INDEX_NAME)); - // TODO: handle options return index; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index f9029aa3afe..ccffc3054fb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -24,7 +24,9 @@ class ControlConnection implements Host.StateListener { private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; - private static final String SELECT_PEERS = "SELECT peer FROM system.peers"; + private static final String SELECT_PEERS = "SELECT peer, data_center, rack FROM system.peers"; + // TODO: We need to be able to fetch the local datacenter + //private static final String SELECT_LOCAL = "SELECT peer, data_center, rack FROM system.peers"; private final AtomicReference connectionRef = new AtomicReference(); @@ -167,28 +169,36 @@ private void refreshNodeList(Connection connection) { // Make sure we're up to date on node list try { ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS)); + //ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL)); connection.write(peersFuture.callback); - Set knownHosts = new HashSet(); - for (Host host : cluster.metadata.allHosts()) - knownHosts.add(host.getAddress()); + List foundHosts = new ArrayList(); + List dcs = new ArrayList(); + List racks = new ArrayList(); - Set foundHosts = new HashSet(); - // The node on which we're connected won't be in the peer table, so let's just add it manually - foundHosts.add(connection.address); for (CQLRow row : peersFuture.get()) { - if (!row.isNull("peer")) + if (!row.isNull("peer")) { // TODO: find what port people are using foundHosts.add(new InetSocketAddress(row.getInet("peer"), Cluster.DEFAULT_PORT)); + dcs.add(row.getString("data_center")); + dcs.add(row.getString("rack")); + } } - // Adds all those we don't know about - for (InetSocketAddress address : Sets.difference(foundHosts, knownHosts)) - cluster.addHost(address, true); + for (int i = 0; i < foundHosts.size(); i++) { + Host host = cluster.metadata.getHost(foundHosts.get(i)); + if (host == null) { + // We don't know that node, add it. + host = cluster.addHost(foundHosts.get(i), true); + } + host.setLocationInfo(dcs.get(i), racks.get(i)); + } // Removes all those that seems to have been removed (since we lost the control connection) - for (InetSocketAddress address : Sets.difference(knownHosts, foundHosts)) - cluster.removeHost(cluster.metadata.getHost(address)); + Set foundHostsSet = new HashSet(foundHosts); + for (Host host : cluster.metadata.allHosts()) + if (!host.getAddress().equals(connection.address) && !foundHostsSet.contains(host.getAddress())) + cluster.removeHost(host); } catch (ConnectionException e) { logger.debug(String.format("[Control connection] Connection error when refeshing hosts list (%s)", e.getMessage())); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 2ebebc4e900..0c8560fe4a8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -16,6 +16,9 @@ public class Host { private final InetSocketAddress address; private final HealthMonitor monitor; + private volatile String datacenter; + private volatile String rack; + // Tracks reconnection attempts to that host so we avoid adding multiple tasks final AtomicReference reconnectionAttempt = new AtomicReference(); @@ -29,6 +32,11 @@ public class Host { this.monitor = new HealthMonitor(policy.create(this)); } + void setLocationInfo(String datacenter, String rack) { + this.datacenter = datacenter; + this.rack = rack; + } + /** * Returns the node address. * @@ -38,6 +46,34 @@ public InetSocketAddress getAddress() { return address; } + /** + * Returns the name of the datacenter this host is part of. + * + * The returned datacenter name is the one as known by Cassandra. Also note + * that it is possible for this information to not be available. In that + * case this method returns {@code null} and caller should always expect + * that possibility. + * + * @return the Cassandra datacenter name. + */ + public String getDatacenter() { + return datacenter; + } + + /** + * Returns the name of the rack this host is part of. + * + * The returned rack name is the one as known by Cassandra. Also note that + * it is possible for this information to not be available. In that case + * this method returns {@code null} and caller should always expect that + * possibility. + * + * @return the Cassandra rack name. + */ + public String getRack() { + return rack; + } + /** * Returns the health monitor for this host. * From 06e4a5022dcca50615a2b3e8ab37d92d12534d9e Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 17 Oct 2012 18:24:41 +0200 Subject: [PATCH 049/719] Updates for move of the CL to the protocol level --- .../com/datastax/driver/core/CQLQuery.java | 9 -- .../com/datastax/driver/core/Connection.java | 2 +- .../driver/core/ConsistencyLevel.java | 19 +++ .../driver/core/ControlConnection.java | 8 +- .../com/datastax/driver/core/Session.java | 141 +++++++++++------- 5 files changed, 111 insertions(+), 68 deletions(-) delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java b/driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java deleted file mode 100644 index efa8f67684e..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLQuery.java +++ /dev/null @@ -1,9 +0,0 @@ -package com.datastax.driver.core; - -/** - * A marker interface for classes representing a CQL query. - * - * This interface require no specific method, but the toString() method of a - * class implementing CQLQuery must return a CQL query string. - */ -public interface CQLQuery {} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 40018e83e11..d168b423b76 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -151,7 +151,7 @@ public void setKeyspace(String keyspace) throws ConnectionException { try { logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); - Message.Response response = write(new QueryMessage("USE " + keyspace)).get(); + Message.Response response = write(new QueryMessage("USE " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL)).get(); switch (response.type) { case RESULT: this.keyspace = keyspace; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java index 7e947a191c0..fcf67c7995f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConsistencyLevel.java @@ -11,6 +11,8 @@ public enum ConsistencyLevel LOCAL_QUORUM, EACH_QUORUM; + static org.apache.cassandra.db.ConsistencyLevel DEFAULT_CASSANDRA_CL = org.apache.cassandra.db.ConsistencyLevel.ONE; + static ConsistencyLevel from(org.apache.cassandra.db.ConsistencyLevel cl) { switch (cl) { case ANY: return ANY; @@ -24,4 +26,21 @@ static ConsistencyLevel from(org.apache.cassandra.db.ConsistencyLevel cl) { } throw new AssertionError(); } + + static org.apache.cassandra.db.ConsistencyLevel toCassandraCL(ConsistencyLevel cl) { + if (cl == null) + return org.apache.cassandra.db.ConsistencyLevel.ONE; + + switch (cl) { + case ANY: return org.apache.cassandra.db.ConsistencyLevel.ANY; + case ONE: return org.apache.cassandra.db.ConsistencyLevel.ONE; + case TWO: return org.apache.cassandra.db.ConsistencyLevel.TWO; + case THREE: return org.apache.cassandra.db.ConsistencyLevel.THREE; + case QUORUM: return org.apache.cassandra.db.ConsistencyLevel.QUORUM; + case ALL: return org.apache.cassandra.db.ConsistencyLevel.ALL; + case LOCAL_QUORUM: return org.apache.cassandra.db.ConsistencyLevel.LOCAL_QUORUM; + case EACH_QUORUM: return org.apache.cassandra.db.ConsistencyLevel.EACH_QUORUM; + } + throw new AssertionError(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index ccffc3054fb..7866a3283c2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -141,10 +141,10 @@ private void refreshSchema(Connection connection, String keyspace, String table) } ResultSet.Future ksFuture = table == null - ? new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES + whereClause)) + ? new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)) : null; - ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause)); - ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS + whereClause)); + ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); if (ksFuture != null) connection.write(ksFuture.callback); @@ -168,7 +168,7 @@ private void refreshSchema(Connection connection, String keyspace, String table) private void refreshNodeList(Connection connection) { // Make sure we're up to date on node list try { - ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS)); + ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); //ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL)); connection.write(peersFuture.callback); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 49569f7b51c..e2fffbde176 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -38,16 +38,11 @@ public class Session { /** * Execute the provided query. * - * This method blocks until at least some result has been received from the - * database. However, for SELECT queries, it does not guarantee that the - * result has been received in full. But it does guarantee that some - * response has been received from the database, and in particular - * guarantee that if the request is invalid, an exception will be thrown - * by this method. + * This method is a shortcut for {@code execute(query, null)}. * - * @param query the CQL query to execute - * @return the result of the query. That result will never be null be can - * be empty and will be for any non SELECT query. + * @param query the CQL query to execute. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). * * @throws NoHostAvailableException if no host in the cluster can be * contacted successfully to execute this query. @@ -56,28 +51,49 @@ public class Session { * the query with the requested consistency level successfully. */ public ResultSet execute(String query) throws NoHostAvailableException, QueryExecutionException { - return executeAsync(query).getUninterruptibly(); + return execute(query, null); } /** * Execute the provided query. * - * This method works exactly as {@link #execute(String)}. + * This method blocks until at least some result has been received from the + * database. However, for SELECT queries, it does not guarantee that the + * result has been received in full. But it does guarantee that some + * response has been received from the database, and in particular + * guarantee that if the request is invalid, an exception will be thrown + * by this method. * - * @param query the CQL query to execute - * @return the result of the query. That result will never be null be can - * be empty and will be for any non SELECT query. + * @param query the CQL query to execute. + * @param consistency the consistency level for the operation. If the query + * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE + * queries for instance), this argument is ignored and null can be + * provided. However, if null is provided while the query requires a + * consistency level, the default consistency level of ONE is used. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). * * @throws NoHostAvailableException if no host in the cluster can be * contacted successfully to execute this query. * @throws QueryExecutionException if the query triggered an execution * exception, i.e. an exception thrown by Cassandra when it cannot execute * the query with the requested consistency level successfully. + */ + public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHostAvailableException, QueryExecutionException { + return executeAsync(query, consistency).getUninterruptibly(); + } + + /** + * Execute the provided query asynchronously. * - * @see #execute(String) + * This method is a shortcut for {@code executeAsync(query, null)}. + * + * @param query the CQL query to execute. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). */ - public ResultSet execute(CQLQuery query) throws NoHostAvailableException, QueryExecutionException { - return execute(query.toString()); + public ResultSet.Future executeAsync(String query) { + return executeAsync(query, null); } /** @@ -93,27 +109,17 @@ public ResultSet execute(CQLQuery query) throws NoHostAvailableException, QueryE * DELETE), you will need to access the ResultSet (i.e. call any of its * method) to make sure the query was successful. * - * @param query the CQL query to execute - * @return the result of the query. That result will never be null be can - * be empty and will be for any non SELECT query. - */ - public ResultSet.Future executeAsync(String query) { - return manager.executeQuery(new QueryMessage(query)); - } - - /** - * Execute the provided query asynchronously. - * - * This method works exactly as {@link #executeAsync(String)}. - * - * @param query the CQL query to execute - * @return the result of the query. That result will never be null be can - * be empty and will be for any non SELECT query. - * - * @see #executeAsync(String) + * @param query the CQL query to execute. + * @param consistency the consistency level for the operation. If the query + * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE + * queries for instance), this argument is ignored and null can be + * provided. However, if null is provided while the query requires a + * consistency level, the default consistency level of ONE is used. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). */ - public ResultSet.Future executeAsync(CQLQuery query) { - return executeAsync(query.toString()); + public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) { + return manager.executeQuery(new QueryMessage(query, ConsistencyLevel.toCassandraCL(consistency))); } /** @@ -132,18 +138,19 @@ public PreparedStatement prepare(String query) throws NoHostAvailableException { } /** - * Prepare the provided query. + * Execute a prepared statement that had values provided for its bound + * variables. * - * @param query the CQL query to prepare - * @return the prepared statement corresponding to {@code query}. + * This method is a shortcut for {@code executePrepared(stmt, null)}. * - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. + * @param stmt the prepared statement with values for its bound variables. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). * - * @see #prepare(String) + * @throws IllegalStateException if {@code !stmt.ready()}. */ - public PreparedStatement prepare(CQLQuery query) throws NoHostAvailableException { - return prepare(query.toString()); + public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException, QueryExecutionException { + return executePrepared(stmt); } /** @@ -155,13 +162,34 @@ public PreparedStatement prepare(CQLQuery query) throws NoHostAvailableException * database. * * @param stmt the prepared statement with values for its bound variables. - * @return the result of the query. That result will never be null be can - * be empty and will be for any non SELECT query. + * @param consistency the consistency level for the operation. If the query + * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE + * queries for instance), this argument is ignored and null can be + * provided. However, if null is provided while the query requires a + * consistency level, the default consistency level of ONE is used. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). * * @throws IllegalStateException if {@code !stmt.ready()}. */ - public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException, QueryExecutionException { - return executePreparedAsync(stmt).getUninterruptibly(); + public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consistency) throws NoHostAvailableException, QueryExecutionException { + return executePreparedAsync(stmt, consistency).getUninterruptibly(); + } + + /** + * Execute a prepared statement that had values provided for its bound + * variables asynchronously. + * + * This method is a shortcut for {@code executePreparedAsync(stmt, null)}. + * + * @param stmt the prepared statement with values for its bound variables. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws IllegalStateException if {@code !stmt.ready()}. + */ + public ResultSet.Future executePreparedAsync(BoundStatement stmt) { + return executePreparedAsync(stmt, null); } /** @@ -173,16 +201,21 @@ public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableExce * the database. * * @param stmt the prepared statement with values for its bound variables. - * @return the result of the query. That result will never be null be can - * be empty and will be for any non SELECT query. + * @param consistency the consistency level for the operation. If the query + * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE + * queries for instance), this argument is ignored and null can be + * provided. However, if null is provided while the query requires a + * consistency level, the default consistency level of ONE is used. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). * * @throws IllegalStateException if {@code !stmt.ready()}. */ - public ResultSet.Future executePreparedAsync(BoundStatement stmt) { + public ResultSet.Future executePreparedAsync(BoundStatement stmt, ConsistencyLevel consistency) { if (!stmt.isReady()) throw new IllegalStateException("Some bind variables haven't been bound in the provided statement"); - return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values))); + return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values), ConsistencyLevel.toCassandraCL(consistency))); } private PreparedStatement toPreparedStatement(String query, Connection.Future future) throws NoHostAvailableException { @@ -296,7 +329,7 @@ public void onRemove(Host host) { public void setKeyspace(String keyspace) throws NoHostAvailableException { try { - executeQuery(new QueryMessage("use " + keyspace)).get(); + executeQuery(new QueryMessage("use " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL)).get(); } catch (InterruptedException e) { // TODO: do we want to handle interrupted exception in a better way? throw new DriverInternalError("Hey! I was waiting!", e); From d7ce4fcd61a204f7a2ee93bda5c979d76c82a2e2 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 18 Oct 2012 20:37:16 +0200 Subject: [PATCH 050/719] Refactor connection pooling to be (hopefully) better and datacenter aware --- .../java/com/datastax/driver/core/CQLRow.java | 2 +- .../com/datastax/driver/core/Cluster.java | 6 +- .../com/datastax/driver/core/Connection.java | 19 +- .../driver/core/ControlConnection.java | 18 +- .../java/com/datastax/driver/core/Host.java | 18 +- .../driver/core/HostConnectionPool.java | 324 ++++++++++------ .../datastax/driver/core/HostDistance.java | 24 ++ .../driver/core/LoadBalancingPolicy.java | 359 +++++++++++++++--- .../driver/core/RetryingCallback.java | 24 +- .../com/datastax/driver/core/Session.java | 31 +- .../com/datastax/driver/core/SessionTest.java | 62 +-- 11 files changed, 648 insertions(+), 239 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/HostDistance.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index d57df122157..f9df27379a3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -49,7 +49,7 @@ public ColumnDefinitions getColumnDefinitions() { */ public boolean isNull(int i) { metadata.checkBounds(i); - return data.get(i) != null; + return data.get(i) == null; } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 937fb6582f6..7cf03eb29f9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -297,12 +297,12 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { private final ControlConnection controlConnection; // TODO: make configurable - final LoadBalancingPolicy.Factory loadBalancingFactory = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE; + final LoadBalancingPolicy.Factory loadBalancingFactory = LoadBalancingPolicy.DCAwareRoundRobin.Factory.create("dc1", 1); + //final LoadBalancingPolicy.Factory loadBalancingFactory = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE; final ScheduledExecutorService reconnectionExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Reconnection")); final ScheduledExecutorService scheduledTasksExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Scheduled Tasks")); - // TODO: give a name to the threads of this executor final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cassandra Java Driver worker")); // All the queries that have been prepared (we keep them so we can @@ -438,7 +438,7 @@ private void prepareAllQueries(Host host) { } } - // TODO: take a lot or something so that if a a getSchema() is called, + // TODO: take a lock or something so that if a a getSchema() is called, // we wait for that to be finished. And maybe avoid multiple refresh at // the same time. public void submitSchemaRefresh(final String keyspace, final String table) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index d168b423b76..11dcb9cec46 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -26,6 +26,8 @@ */ class Connection extends org.apache.cassandra.transport.Connection { + public static final int MAX_STREAM_PER_CONNECTION = 128; + private static final Logger logger = LoggerFactory.getLogger(Connection.class); // TODO: that doesn't belong here @@ -44,7 +46,10 @@ public void closeAll() {} private final Factory factory; private final Dispatcher dispatcher = new Dispatcher(); - private AtomicInteger inFlight = new AtomicInteger(0); + // Used by connnection pooling to count how many requests are "in flight" on that connection. + public final AtomicInteger inFlight = new AtomicInteger(0); + + private final AtomicInteger writer = new AtomicInteger(0); private volatile boolean isClosed; private volatile String keyspace; @@ -71,7 +76,7 @@ private Connection(String name, InetSocketAddress address, Factory factory) thro ChannelFuture future = bootstrap.connect(address); - inFlight.incrementAndGet(); + writer.incrementAndGet(); try { // Wait until the connection attempt succeeds or fails. this.channel = future.awaitUninterruptibly().getChannel(); @@ -81,7 +86,7 @@ private Connection(String name, InetSocketAddress address, Factory factory) thro throw new TransportException(address, "Cannot connect", future.getCause()); } } finally { - inFlight.decrementAndGet(); + writer.decrementAndGet(); } logger.trace(String.format("[%s] Connection opened successfully", name)); @@ -202,7 +207,7 @@ public void write(ResponseCallback callback) throws ConnectionException { request.attach(this); // We only support synchronous mode so far - inFlight.incrementAndGet(); + writer.incrementAndGet(); try { ResponseHandler handler = new ResponseHandler(dispatcher, callback); @@ -231,7 +236,7 @@ public void write(ResponseCallback callback) throws ConnectionException { logger.trace(String.format("[%s] request sent successfully", name)); } finally { - inFlight.decrementAndGet(); + writer.decrementAndGet(); } } @@ -248,7 +253,7 @@ public void close() { if (!isDefunct) { try { // Busy waiting, we just wait for request to be fully written, shouldn't take long - while (inFlight.get() > 0) + while (writer.get() > 0) Thread.sleep(10); } catch (InterruptedException e) { throw new RuntimeException(e); @@ -368,7 +373,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { logger.trace(String.format("[%s] connection error", name), e.getCause()); // Ignore exception while writting, this will be handled by write() directly - if (inFlight.get() > 0) + if (writer.get() > 0) return; defunct(new TransportException(address, "Unexpected exception triggered", e.getCause())); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 7866a3283c2..4c2edf9d97e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -25,8 +25,7 @@ class ControlConnection implements Host.StateListener { private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; private static final String SELECT_PEERS = "SELECT peer, data_center, rack FROM system.peers"; - // TODO: We need to be able to fetch the local datacenter - //private static final String SELECT_LOCAL = "SELECT peer, data_center, rack FROM system.peers"; + private static final String SELECT_LOCAL = "SELECT data_center, rack FROM system.local WHERE key='local'"; private final AtomicReference connectionRef = new AtomicReference(); @@ -118,7 +117,6 @@ private Connection tryConnect(Host host) throws ConnectionException { }); connection.write(new RegisterMessage(evs)); - logger.trace("[Control connection] Refreshing schema"); refreshSchema(connection, null, null); refreshNodeList(connection); return connection; @@ -169,8 +167,9 @@ private void refreshNodeList(Connection connection) { // Make sure we're up to date on node list try { ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - //ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL)); + ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); connection.write(peersFuture.callback); + connection.write(localFuture.callback); List foundHosts = new ArrayList(); List dcs = new ArrayList(); @@ -181,7 +180,7 @@ private void refreshNodeList(Connection connection) { // TODO: find what port people are using foundHosts.add(new InetSocketAddress(row.getInet("peer"), Cluster.DEFAULT_PORT)); dcs.add(row.getString("data_center")); - dcs.add(row.getString("rack")); + racks.add(row.getString("rack")); } } @@ -200,6 +199,15 @@ private void refreshNodeList(Connection connection) { if (!host.getAddress().equals(connection.address) && !foundHostsSet.contains(host.getAddress())) cluster.removeHost(host); + // Update DC and rack for the one node we are connected to + Host host = cluster.metadata.getHost(connection.address); + // In theory host can't be null. However there is no point in risking a NPE in case we + // have a race between a node removal and this. + if (host != null) { + for (CQLRow row : localFuture.get()) + host.setLocationInfo(row.getString("data_center"), row.getString("rack")); + } + } catch (ConnectionException e) { logger.debug(String.format("[Control connection] Connection error when refeshing hosts list (%s)", e.getMessage())); reconnect(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 0c8560fe4a8..105475fb9c4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -115,15 +115,9 @@ public class HealthMonitor { private Set listeners = new CopyOnWriteArraySet(); private volatile boolean isUp; - // This is a hack (I did not find a much cleaner option) to not expose - // signalConnectionFailure publicly but still being able to call it - // from other packages (typically from HostConnectionPool). - final Signaler signaler; - HealthMonitor(ConvictionPolicy policy) { this.policy = policy; this.isUp = true; - this.signaler = new Signaler(); } /** @@ -176,13 +170,11 @@ void reset() { listener.onUp(Host.this); } - public class Signaler { - public boolean signalConnectionFailure(ConnectionException exception) { - boolean isDown = policy.addFailure(exception); - if (isDown) - setDown(); - return isDown; - } + boolean signalConnectionFailure(ConnectionException exception) { + boolean isDown = policy.addFailure(exception); + if (isDown) + setDown(); + return isDown; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 68635250318..c82e451248d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -1,174 +1,260 @@ package com.datastax.driver.core; +import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +// TODO: We should allow changing the core pool size (i.e. have a method that +// adds new connection or trash existing one) class HostConnectionPool { - private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); - private final Host host; - private final Host.HealthMonitor.Signaler failureSignaler; - + public final Host host; + public volatile HostDistance hostDistance; private final Connection.Factory factory; + private final Configuration configuration; + private final Session.Manager manager; + private final List connections; + private final AtomicInteger open; private final AtomicBoolean isShutdown = new AtomicBoolean(); + private final Set trash = new CopyOnWriteArraySet(); - private final BlockingQueue available = new LinkedBlockingQueue(); - - private final AtomicInteger open = new AtomicInteger(0); - private final AtomicInteger borrowed = new AtomicInteger(0); - private final AtomicInteger waitingThreads = new AtomicInteger(0); - - private final Configuration configuration; + private final Lock waitLock = new ReentrantLock(true); + private final Condition hasAvailableConnection = waitLock.newCondition(); - // TODO: We could share that executor across pools - private final ExecutorService openExecutor = Executors.newCachedThreadPool(); private final Runnable newConnectionTask; - public HostConnectionPool(Host host, Host.HealthMonitor.Signaler signaler, Connection.Factory factory, Configuration configuration) { + public HostConnectionPool(Host host, HostDistance hostDistance, Connection.Factory factory, Configuration configuration, Session.Manager manager) throws ConnectionException { this.host = host; - this.failureSignaler = signaler; + this.hostDistance = hostDistance; this.factory = factory; this.configuration = configuration; + this.manager = manager; this.newConnectionTask = new Runnable() { public void run() { - // If when we execute there is still some waiting threads, create a connection - if (waitingThreads.get() > 0) - addConnectionIfUnderMaximum(); + addConnectionIfUnderMaximum(); } }; // Create initial core connections - for (int i = 0; i < configuration.coreConnections; i++) - if (!addConnection()) - break; + List l = new ArrayList(configuration.getCoreConnectionsPerHost(hostDistance)); + for (int i = 0; i < configuration.getCoreConnectionsPerHost(hostDistance); i++) + l.add(factory.open(host)); + this.connections = new CopyOnWriteArrayList(l); + this.open = new AtomicInteger(connections.size()); logger.trace(String.format("Created connection pool to host %s", host)); } - public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException { + public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { if (isShutdown.get()) // TODO: have a specific exception throw new ConnectionException(host.getAddress(), "Pool is shutdown"); - Connection connection = available.poll(); - if (connection == null) - { - // Request the opening of a connection, unless we already know there is too much - if (open.get() < configuration.maxConnections) - openExecutor.submit(newConnectionTask); + if (connections.isEmpty()) { + for (int i = 0; i < configuration.getCoreConnectionsPerHost(hostDistance); i++) + spawnNewConnection(); + return waitForConnection(timeout, unit); + } - connection = waitForConnection(timeout, unit); + int minInFlight = Integer.MAX_VALUE; + Connection leastBusy = null; + for (Connection connection : connections) { + int inFlight = connection.inFlight.get(); + if (inFlight < minInFlight) { + minInFlight = inFlight; + leastBusy = connection; + } } - borrowed.incrementAndGet(); - connection.setKeyspace(configuration.keyspace); - return connection; - } + if (minInFlight >= configuration.getMaxStreamsPerConnectionTreshold(hostDistance) && connections.size() < configuration.getMaxConnectionPerHost(hostDistance)) + spawnNewConnection(); - private boolean addConnectionIfUnderMaximum() { + while (true) { + int inFlight = leastBusy.inFlight.get(); - // First, make sure we don't cross the allowed limit of open connections - for(;;) { - int opened = open.get(); - if (opened >= configuration.maxConnections) - return false; + if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) { + leastBusy = waitForConnection(timeout, unit); + break; + } - if (open.compareAndSet(opened, opened + 1)) + if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) break; } - return addConnection(); + leastBusy.setKeyspace(configuration.keyspace); + return leastBusy; } - private boolean addConnection() { + private static long elapsed(long start, TimeUnit unit) { + return unit.convert(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); + } + + private void awaitAvailableConnection(long timeout, TimeUnit unit) { + waitLock.lock(); try { - available.offer(factory.open(host)); + hasAvailableConnection.await(timeout, unit); + } catch (InterruptedException e) { + // TODO: Do we want to stop ignoring that? + } finally { + waitLock.unlock(); + } + } - if (isShutdown.get()) { - discardAvailableConnections(); - return false; - } else { - return true; - } - } catch (ConnectionException e) { - logger.debug("Connection error to " + host + ", signaling monitor"); - if (failureSignaler.signalConnectionFailure(e)) - shutdown(); - return false; + private void signalAvailableConnection() { + waitLock.lock(); + try { + hasAvailableConnection.signal(); + } finally { + waitLock.unlock(); } } - // This is guaranteed to either return a connection or throw an exception - private Connection waitForConnection(long timeout, TimeUnit unit) throws ConnectionException { - waitingThreads.incrementAndGet(); + private void signalAllAvailableConnection() { + waitLock.lock(); try { - Connection connection = available.poll(timeout, unit); - if (connection == null) - // TODO: maybe create a special exception for that - throw new ConnectionException(host.getAddress(), "No free connection available"); - return connection; - } catch (InterruptedException e) { - throw new RuntimeException(); + hasAvailableConnection.signal(); } finally { - waitingThreads.decrementAndGet(); + waitLock.unlock(); } } + + private Connection waitForConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { + long start = System.currentTimeMillis(); + long remaining = timeout; + do { + awaitAvailableConnection(remaining, unit); + + if (isShutdown()) + throw new ConnectionException(host.getAddress(), "Pool is shutdown"); + + int minInFlight = Integer.MAX_VALUE; + Connection leastBusy = null; + for (Connection connection : connections) { + int inFlight = connection.inFlight.get(); + if (inFlight < minInFlight) { + minInFlight = inFlight; + leastBusy = connection; + } + } + + while (true) { + int inFlight = leastBusy.inFlight.get(); + + if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) + break; + + if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) + return leastBusy; + } + + remaining = timeout - elapsed(start, unit); + } while (remaining > 0); + + throw new TimeoutException(); + } + public void returnConnection(Connection connection) { - borrowed.decrementAndGet(); + int inFlight = connection.inFlight.decrementAndGet(); if (connection.isDefunct()) { - if (failureSignaler.signalConnectionFailure(connection.lastException())) + if (host.getMonitor().signalConnectionFailure(connection.lastException())) shutdown(); - // TODO: make the close async - connection.close(); - open.decrementAndGet(); - return; - } + else + replace(connection); + } else { - // Return the connection as available if we have <= core connections opened, or if we have waiting threads. - // Otherwise, close it (but if some other thread beats us at closing, keep available) - if (waitingThreads.get() > 0 || open.get() <= configuration.coreConnections || !closeConnectionIfIdle(connection)) { - available.offer(connection); + if (trash.contains(connection) && inFlight == 0) { + if (trash.remove(connection)) + close(connection); + return; + } - // Sanity check - if (isShutdown.get()) - discardAvailableConnections(); + if (connections.size() > configuration.getCoreConnectionsPerHost(hostDistance) && inFlight <= configuration.getMinStreamsPerConnectionTreshold(hostDistance)) { + trashConnection(connection); + } else { + signalAvailableConnection(); + } } } - public boolean closeConnectionIfIdle(Connection connection) { - for (;;) { + private boolean trashConnection(Connection connection) { + // First, make sure we don't go below core connections + for(;;) { int opened = open.get(); - if (opened <= configuration.coreConnections) { + if (opened <= configuration.getCoreConnectionsPerHost(hostDistance)) return false; - } - assert opened > 0; + if (open.compareAndSet(opened, opened - 1)) break; } - // TODO: maybe we should do the close asynchronously? - connection.close(); - open.decrementAndGet(); + trash.add(connection); + connections.remove(connection); + + if (connection.inFlight.get() == 0 && trash.remove(connection)) + close(connection); return true; } - // Open connections if there is < core and close some if there is > core and some are idle - public void ensureCoreSize() { - int opened = open.get(); - if (opened < configuration.coreConnections) { - while (addConnectionIfUnderMaximum()); - } else { - Connection connection = available.poll(); - while (connection != null && closeConnectionIfIdle(connection)) - connection = available.poll(); + private boolean addConnectionIfUnderMaximum() { + + // First, make sure we don't cross the allowed limit of open connections + for(;;) { + int opened = open.get(); + if (opened >= configuration.getMaxConnectionPerHost(hostDistance)) + return false; + + if (open.compareAndSet(opened, opened + 1)) + break; + } + + if (isShutdown()) { + open.decrementAndGet(); + return false; } + + // Now really open the connection + try { + connections.add(factory.open(host)); + signalAvailableConnection(); + return true; + } catch (ConnectionException e) { + open.decrementAndGet(); + logger.debug("Connection error to " + host + " while creating additional connection"); + if (host.getMonitor().signalConnectionFailure(e)) + shutdown(); + return false; + } + } + + private void spawnNewConnection() { + manager.cluster.manager.executor.submit(newConnectionTask); + } + + private void replace(final Connection connection) { + connections.remove(connection); + + manager.cluster.manager.executor.submit(new Runnable() { + public void run() { + connection.close(); + addConnectionIfUnderMaximum(); + } + }); + } + + private void close(final Connection connection) { + manager.cluster.manager.executor.submit(new Runnable() { + public void run() { + connection.close(); + } + }); } public boolean isShutdown() { @@ -181,47 +267,55 @@ public void shutdown() { logger.debug("Shutting down pool"); - // TODO: we can have threads waiting for connection on the queue. - // It would be nice to be able to wake them up here (otherwise they - // will have to wait for the timeout). One option would be to feed some - // fake connections object to available that borrow would recognize - + // Wake up all threads that waits + signalAllAvailableConnection(); discardAvailableConnections(); } private void discardAvailableConnections() { - while (!available.isEmpty()) { - // TODO: If we make the close async, wait for it here - available.poll().close(); + for (Connection connection : connections) { + connection.close(); open.decrementAndGet(); } } + // TODO: move that out an make that configurable public static class Configuration { private volatile String keyspace; - private volatile int coreConnections = 2; - private volatile int maxConnections = 100; - public void setKeyspace(String keyspace) { this.keyspace = keyspace; } - public void setCoreConnections(int value) { - coreConnections = value; + public int getMinStreamsPerConnectionTreshold(HostDistance distance) { + return 25; } - public int getCoreConnections() { - return coreConnections; + public int getMaxStreamsPerConnectionTreshold(HostDistance distance) { + return 100; } - public void setMaxConnections(int value) { - maxConnections = value; + public int getCoreConnectionsPerHost(HostDistance distance) { + switch (distance) { + case LOCAL: + return 2; + case REMOTE: + return 1; + default: + return 0; + } } - public int getMaxConnections() { - return maxConnections; + public int getMaxConnectionPerHost(HostDistance distance) { + switch (distance) { + case LOCAL: + return 10; + case REMOTE: + return 3; + default: + return 0; + } } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java new file mode 100644 index 00000000000..4644650435a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java @@ -0,0 +1,24 @@ +package com.datastax.driver.core; + +/** + * The distance to a Cassandra node as assigned by a + * {@link LoadBalancingPolicy} (through its {@code distance} method). + * + * The distance assigned to an host influence how many connections the driver + * maintains towards this host. If for a given host the assigned {@code HostDistance} + * is {@code LOCAL} or {@code REMOTE}, some connections will be maintained by + * the driver to this host. More active connections will be kept to + * {@code LOCAL} host than to a {@code REMOTE} one (and thus well behaving + * {@code LoadBalancingPolicy} should assign a {@code REMOTE} distance only to + * hosts that are the less often queried). + *

+ * However, if an host is assigned the distance {@code IGNORED}, no connection + * to that host will maintained active. In other words, {@code IGNORED} should + * be assigned to hosts that should not be used by this driver (because they + * are in a remote datacenter for instance). + */ +public enum HostDistance { + LOCAL, + REMOTE, + IGNORED +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java index 008883f4f2e..793f0250ae9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java @@ -1,16 +1,22 @@ package com.datastax.driver.core; -import java.util.Collection; -import java.util.Iterator; -import java.util.Random; +import java.util.*; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.AbstractIterator; + /** * The policy that decides which Cassandra hosts to contact for each new query. * - * The main method to implement is {@link LoadBalancingPolicy#newQueryPlan} and - * is used for each query to find which host to query first, and which hosts to - * use as failover. + * Two methods need to be implemented: + *

    + *
  • {@link LoadBalancingPolicy#distance}: returns the "distance" of an + * host for that balancing policy.
  • + *
  • {@link LoadBalancingPolicy#newQueryPlan}: it is used for each query to + * find which host to query first, and which hosts to use as failover.
  • + *
+ * * * The {@code LoadBalancingPolicy} is a {@link Host.StateListener} and is thus * informed of hosts up/down events. For efficiency purposes, the policy is @@ -19,10 +25,28 @@ public interface LoadBalancingPolicy extends Host.StateListener { /** - * Returns the hosts to use for a given query. + * Returns the distance assigned by this policy to the provided host. + * + * The distance of an host influence how much connections are kept to the + * node (see {@Link HostDistance}). A policy should assign a {@code + * LOCAL} distance to nodes that are susceptible to be returned first by + * {@code newQueryPlan} and it is useless for {@code newQueryPlan} to + * return hosts to which it assigns an {@code IGNORED} distance. + * + * The host distance is primarily used to prevent keeping too many + * connections to host in remote datacenters when the policy itself always + * picks host in the local datacenter first. + * + * @param the host of which to return the distance of. + * @return the HostDistance to {@host}. + */ + public HostDistance distance(Host host); + + /** + * Returns the hosts to use for a new query. * * Each new query will call this method. The first host in the result will - * then be used to perform the query. In the even of a connection problem + * then be used to perform the query. In the event of a connection problem * (the queried host is down or appear to be so), the next host will be * used. If all hosts of the returned {@code Iterator} are down, the query * will fail. @@ -53,81 +77,258 @@ public interface Factory { * This policy queries nodes in a round-robin fashion. For a given query, * if an host fail, the next one (following the round-robin order) is * tried, until all hosts have been tried. + * + * This policy is not datacenter aware and will include every known + * Cassandra host in its round robin algorithm. If you use multiple + * datacenter this will be inefficient and you will want to use the + * DCAwareRoundRobin load balancing policy instead. */ public static class RoundRobin implements LoadBalancingPolicy { - private volatile Host[] liveHosts; + private final CopyOnWriteArrayList liveHosts; private final AtomicInteger index = new AtomicInteger(); private RoundRobin(Collection hosts) { - this.liveHosts = hosts.toArray(new Host[hosts.size()]); + this.liveHosts = new CopyOnWriteArrayList(hosts); this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); } + /** + * Return the HostDistance for the provided host. + * + * This policy consider all nodes as local. This is generally the right + * thing to do in a single datacenter deployement. If you use multiple + * datacenter, see {@link DCAwareRoundRobin} instead. + * + * @param the host of which to return the distance of. + * @return the HostDistance to {@host}. + */ + public HostDistance distance(Host host) { + return HostDistance.LOCAL; + } + + /** + * Returns the hosts to use for a new query. + * + * The returned plan will try each known host of the cluster. Upon each + * call to this method, the ith host of the plans returned will cycle + * over all the host of the cluster in a round-robin fashion. + * + * @return a new query plan, i.e. an iterator indicating which host to + * try first for querying, which one to use as failover, etc... + */ public Iterator newQueryPlan() { - final Host[] hosts = liveHosts; + // We clone liveHosts because we want a version of the list that + // cannot change concurrently of the query plan iterator (this + // would be racy). We use clone() as it don't involve a copy of the + // underlying array (and thus we rely on liveHosts being a CopyOnWriteArrayList). + final List hosts = (List)liveHosts.clone(); final int startIdx = index.getAndIncrement(); // Overflow protection; not theoretically thread safe but should be good enough if (startIdx > Integer.MAX_VALUE - 10000) index.set(0); - return new Iterator() { + return new AbstractIterator() { private int idx = startIdx; - private int remaining = hosts.length; + private int remaining = hosts.size(); - public boolean hasNext() { - return remaining > 0; - } + protected Host computeNext() { + if (remaining <= 0) + return endOfData(); - public Host next() { - Host h = hosts[idx++ % hosts.length]; remaining--; - return h; - } - - public void remove() { - throw new UnsupportedOperationException(); + return hosts.get(idx++ % hosts.size()); } }; } - public synchronized void onUp(Host host) { + public void onUp(Host host) { + liveHosts.addIfAbsent(host); + } - for (Host h : liveHosts) - if (h.equals(host)) - return; + public void onDown(Host host) { + liveHosts.remove(host); + } - Host[] newHosts = new Host[liveHosts.length + 1]; - System.arraycopy(liveHosts, 0, newHosts, 0, liveHosts.length); - newHosts[newHosts.length - 1] = host; - liveHosts = newHosts; + public void onAdd(Host host) { + onUp(host); } - public synchronized void onDown(Host host) { - int idx = -1; - for (int i = 0; i < liveHosts.length; i++) { - if (liveHosts[i].equals(host)) { - idx = i; - break; - } + public void onRemove(Host host) { + onDown(host); + } + + /** + * A {@code LoadBalancingPolicy.Factory} that creates RoundRobin + * policies (on the whole cluster). + */ + public static class Factory implements LoadBalancingPolicy.Factory { + + public static final Factory INSTANCE = new Factory(); + + private Factory() {} + + public LoadBalancingPolicy create(Collection hosts) { + return new RoundRobin(hosts); } + } + } - if (idx == -1) - return; - - Host[] newHosts = new Host[liveHosts.length - 1]; - if (idx == 0) { - System.arraycopy(liveHosts, 1, newHosts, 0, newHosts.length); - } else if (idx == liveHosts.length - 1) { - System.arraycopy(liveHosts, 0, newHosts, 0, newHosts.length); - } else { - System.arraycopy(liveHosts, 0, newHosts, 0, idx); - System.arraycopy(liveHosts, idx + 1, newHosts, idx, liveHosts.length - idx - 1); + /** + * A data-center aware Round-robin load balancing policy. + * + * This policy provides round-robin queries over the node of the local + * datacenter. It also includes in the query plans returned a configurable + * number of hosts in the remote datacenters, but those are always tried + * after the local nodes. In other words, this policy guarantees that no + * host in a remote datacenter will be queried unless no host in the local + * datacenter can be reached. + * + * If used with a single datacenter, this policy is equivalent to the + * {@code LoadBalancingPolicy.RoundRobin} policy, but its DC awareness + * incurs a slight overhead so the {@code LoadBalancingPolicy.RoundRobin} + * policy could be prefered to this policy in that case. + */ + public static class DCAwareRoundRobin implements LoadBalancingPolicy { + + private final ConcurrentMap> perDcLiveHosts = new ConcurrentHashMap>(); + private final AtomicInteger index = new AtomicInteger(); + private final String localDc; + private final int usedHostsPerRemoteDc; + + private DCAwareRoundRobin(Collection hosts, String localDc, int usedHostsPerRemoteDc) { + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + this.localDc = localDc; + this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; + + for (Host host : hosts) { + String dc = dc(host); + CopyOnWriteArrayList prev = perDcLiveHosts.get(dc); + if (prev == null) + perDcLiveHosts.put(dc, new CopyOnWriteArrayList(Collections.singletonList(host))); + else + prev.addIfAbsent(host); } - liveHosts = newHosts; + } + + private String dc(Host host) { + String dc = host.getDatacenter(); + return dc == null ? localDc : dc; + } + + /** + * Return the HostDistance for the provided host. + * + * This policy consider nodes in the local datacenter as {@code LOCAL}. + * For each remote datacenter, it considers a configurable number of + * hosts as {@code REMOTE} and the rest is {@code IGNORED}. + * + * To configure how many host in each remote datacenter is considered + * {@code REMOTE}, see {@link Factory#create(String, int)}. + * + * @param the host of which to return the distance of. + * @return the HostDistance to {@host}. + */ + public HostDistance distance(Host host) { + String dc = dc(host); + if (dc.equals(localDc)) + return HostDistance.LOCAL; + + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null || usedHostsPerRemoteDc == 0) + return HostDistance.IGNORED; + + // We need to clone, otherwise our subList call is not thread safe + dcHosts = (CopyOnWriteArrayList)dcHosts.clone(); + return dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)).contains(host) + ? HostDistance.REMOTE + : HostDistance.IGNORED; + } + + /** + * Returns the hosts to use for a new query. + * + * The returned plan will always try each known host in the local + * datacenter first, and then, if none of the local host is reacheable, + * will try up to a configurable number of other host per remote datacenter. + * The order of the local node in the returned query plan will follow a + * Round-robin algorithm. + * + * @return a new query plan, i.e. an iterator indicating which host to + * try first for querying, which one to use as failover, etc... + */ + public Iterator newQueryPlan() { + + CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); + final List hosts = localLiveHosts == null ? Collections.emptyList() : (List)localLiveHosts.clone(); + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) + index.set(0); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remainingLocal = hosts.size(); + + // For remote Dcs + private Iterator remoteDcs; + private List currentDcHosts; + private int currentDcRemaining; + + protected Host computeNext() { + if (remainingLocal > 0) { + remainingLocal--; + return hosts.get(idx++ % hosts.size()); + } + + if (currentDcHosts != null && currentDcRemaining > 0) { + currentDcRemaining--; + return currentDcHosts.get(idx++ % currentDcHosts.size()); + } + + if (remoteDcs == null) { + Set copy = new HashSet(perDcLiveHosts.keySet()); + copy.remove(localDc); + remoteDcs = copy.iterator(); + } + + if (!remoteDcs.hasNext()) + return endOfData(); + + String nextRemoteDc = remoteDcs.next(); + CopyOnWriteArrayList nextDcHosts = perDcLiveHosts.get(nextRemoteDc); + if (nextDcHosts != null) { + currentDcHosts = (List)nextDcHosts.clone(); + currentDcRemaining = Math.min(usedHostsPerRemoteDc, currentDcHosts.size()); + } + + return computeNext(); + } + }; + } + + public void onUp(Host host) { + String dc = dc(host); + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null) { + CopyOnWriteArrayList newMap = new CopyOnWriteArrayList(Collections.singletonList(host)); + dcHosts = perDcLiveHosts.putIfAbsent(dc, newMap); + // If we've successfully put our new host, we're good, otherwise we've been beaten so continue + if (dcHosts == null) + return; + } + dcHosts.addIfAbsent(host); + } + + public void onDown(Host host) { + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc(host)); + if (dcHosts != null) + dcHosts.remove(host); } public void onAdd(Host host) { @@ -138,14 +339,66 @@ public void onRemove(Host host) { onDown(host); } + /** + * A {@code LoadBalancingPolicy.Factory} that creates DCAwareRoundRobin + * policies. + */ public static class Factory implements LoadBalancingPolicy.Factory { - public static final Factory INSTANCE = new Factory(); + public static final int DEFAULT_USED_HOSTS_PER_REMOTE_DC = 0; - private Factory() {} + private final String localDc; + private final int usedHostsPerRemoteDc; + + private Factory(String localDc, int usedHostsPerRemoteDc) { + this.localDc = localDc; + this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; + } + + /** + * Creates a new DCAwareRoundRobin policy factory given the name of + * the local datacenter. + * + * The name of the local datacenter provided must be the local + * datacenter name as known by Cassandra. + * + * The policy created by the returned factory will ignore all + * remote hosts. In other words, this is equivalent to + * {@code create(localDc, 0)}. + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). + * @return the newly created factory. + */ + public static Factory create(String localDc) { + return new Factory(localDc, DEFAULT_USED_HOSTS_PER_REMOTE_DC); + } + + /** + * Creates a new DCAwareRoundRobin policy factory given the name of + * the local datacenter that use the provided number of host per + * remote datacenter as failover for the local hosts. + * + * The name of the local datacenter provided must be the local + * datacenter name as known by Cassandra. + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). + * @param usedHostsPerRemoteDc the number of host per remote + * datacenter that policies created by the returned factory should + * consider. Created policies {@code distance} method will return a + * {@code HostDistance.REMOTE} distance for only {@code + * usedHostsPerRemoteDc} hosts per remote datacenter. Other hosts + * of the remote datacenters will be ignored (and thus no + * connections to them will be maintained). + * @return the newly created factory. + */ + public static Factory create(String localDc, int usedHostsPerRemoteDc) { + return new Factory(localDc, usedHostsPerRemoteDc); + } public LoadBalancingPolicy create(Collection hosts) { - return new RoundRobin(hosts); + return new DCAwareRoundRobin(hosts, localDc, usedHostsPerRemoteDc); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index a2baf051bc9..4f0f65fcd95 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -5,6 +5,7 @@ import java.util.Iterator; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeUnit; import java.util.concurrent.ExecutionException; @@ -68,7 +69,7 @@ private boolean query(Host host) { return false; try { - Connection connection = pool.borrowConnection(manager.DEFAULT_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); + Connection connection = pool.borrowConnection(manager.DEFAULT_PER_HOST_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); current = host; try { connection.write(this); @@ -78,16 +79,20 @@ private boolean query(Host host) { } } catch (ConnectionException e) { // If we have any problem with the connection, move to the next node. - logError(e); + logError(host.getAddress(), e.getMessage()); + return false; + } catch (TimeoutException e) { + // We timeout, log it but move to the next node. + logError(host.getAddress(), "Timeout while trying to acquire available connection"); return false; } } - private void logError(ConnectionException e) { - logger.debug(String.format("Error querying %s, trying next host (error is: %s)", e.address, e.getMessage())); + private void logError(InetSocketAddress address, String msg) { + logger.debug(String.format("Error querying %s, trying next host (error is: %s)", address, msg)); if (errors == null) errors = new HashMap(); - errors.put(e.address, e.getMessage()); + errors.put(address, msg); } private void retry(final boolean retryCurrent) { @@ -164,16 +169,16 @@ public void onSet(Connection connection, Message.Response response) { // TODO check return ? retry = true; } catch (InterruptedException e) { - logError(new ConnectionException(connection.address, "Interrupted while preparing query to execute")); + logError(connection.address, "Interrupted while preparing query to execute"); retry(false); return; } catch (ExecutionException e) { - logError(new ConnectionException(connection.address, "Unexpected problem while preparing query to execute: " + e.getCause().getMessage())); + logError(connection.address, "Unexpected problem while preparing query to execute: " + e.getCause().getMessage()); retry(false); return; } catch (ConnectionException e) { logger.debug("Connection exception while preparing missing statement", e); - logError(e); + logError(e.address, e.getMessage()); retry(false); return; } @@ -195,7 +200,8 @@ public void onSet(Connection connection, Message.Response response) { public void onException(Exception exception) { if (exception instanceof ConnectionException) { - logError((ConnectionException)exception); + ConnectionException ce = (ConnectionException)exception; + logError(ce.address, ce.getMessage()); retry(false); return; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index e2fffbde176..e3553b729cd 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -273,7 +273,7 @@ static class Manager implements Host.StateListener { final HostConnectionPool.Configuration poolsConfiguration; // TODO: Make that configurable - final long DEFAULT_CONNECTION_TIMEOUT = 3000; + final long DEFAULT_PER_HOST_CONNECTION_TIMEOUT = 3000; public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; @@ -288,7 +288,17 @@ public Manager(Cluster cluster, Collection hosts) { } private HostConnectionPool addHost(Host host) { - return pools.put(host, new HostConnectionPool(host, host.getMonitor().signaler, cluster.manager.connectionFactory, poolsConfiguration)); + try { + HostDistance distance = loadBalancer.distance(host); + if (distance == HostDistance.IGNORED) + return pools.get(host); + else + return pools.put(host, new HostConnectionPool(host, distance, cluster.manager.connectionFactory, poolsConfiguration, this)); + } catch (ConnectionException e) { + logger.debug(String.format("Error creating pool to %s (%s)", host, e.getMessage())); + host.getMonitor().signalConnectionFailure(e); + return pools.get(host); + } } public void onUp(Host host) { @@ -307,6 +317,21 @@ public void onDown(Host host) { // This should not be necessary but it's harmless if (pool != null) pool.shutdown(); + + // If we've remove a host, the loadBalancer is allowed to change his mind on host distances. + for (Host h : cluster.getMetadata().allHosts()) { + if (!h.getMonitor().isUp()) + continue; + + HostDistance dist = loadBalancer.distance(h); + if (dist != HostDistance.IGNORED) { + HostConnectionPool p = pools.get(h); + if (p == null) + addHost(host); + else + p.hostDistance = dist; + } + } } public void onAdd(Host host) { @@ -368,6 +393,8 @@ public void prepare(String query, InetSocketAddress toExclude) { c.write(new PrepareMessage(query)).get(); } catch (ConnectionException e) { // Again, not being able to prepare the query right now is no big deal, so just ignore + } catch (TimeoutException e) { + // Same as above } catch (InterruptedException e) { // Same as above } catch (ExecutionException e) { diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 4de1b297452..412dec4e4fa 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -161,40 +161,14 @@ public static void classSetUp() { // } //} - //@Test - //public void MultiNodeContinuousExecuteTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - // Session session = cluster.connect(); - - // try { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); - // // We should deal with that sleep - // try { Thread.sleep(2000); } catch (Exception e) {} - // session.execute("USE test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - // } catch (AlreadyExistsException e) { - // // Skip if already exists - // session.execute("USE test_ks"); - // } - - // //System.out.println("--- Schema ---\n" + cluster.getMetadata()); - - // for (int i = 0; i < 10000; ++i) { - // System.out.println(">> " + i); - // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - // Thread.currentThread().sleep(1000); - // } - //} - @Test - public void SchemaTest() throws Exception { + public void MultiNodeContinuousExecuteTest() throws Exception { Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); Session session = cluster.connect(); try { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); // We should deal with that sleep try { Thread.sleep(2000); } catch (Exception e) {} session.execute("USE test_ks"); @@ -204,10 +178,36 @@ public void SchemaTest() throws Exception { session.execute("USE test_ks"); } + //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + for (int i = 0; i < 10000; ++i) { - System.out.println("--- Schema " + i + " ---"); - System.out.println(cluster.getMetadata().getKeyspace("test_ks").exportAsString()); - Thread.currentThread().sleep(4000); + System.out.println(">> " + i); + session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + Thread.currentThread().sleep(1000); } } + + //@Test + //public void SchemaTest() throws Exception { + + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + // Session session = cluster.connect(); + + // try { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // // We should deal with that sleep + // try { Thread.sleep(2000); } catch (Exception e) {} + // session.execute("USE test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + // } catch (AlreadyExistsException e) { + // // Skip if already exists + // session.execute("USE test_ks"); + // } + + // for (int i = 0; i < 10000; ++i) { + // System.out.println("--- Schema " + i + " ---"); + // System.out.println(cluster.getMetadata().getKeyspace("test_ks").exportAsString()); + // Thread.currentThread().sleep(4000); + // } + //} } From a4bb33026edec773e90caf0085e5685b406f4305 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 19 Oct 2012 14:48:47 +0200 Subject: [PATCH 051/719] More complete retry policies --- .../driver/core/LoadBalancingPolicy.java | 43 +- .../com/datastax/driver/core/ResultSet.java | 5 +- .../com/datastax/driver/core/RetryPolicy.java | 458 +++++++++++++++++- .../driver/core/RetryingCallback.java | 70 ++- .../com/datastax/driver/core/Session.java | 2 +- .../com/datastax/driver/core/WriteType.java | 44 ++ .../exceptions/WriteTimeoutException.java | 15 +- 7 files changed, 567 insertions(+), 70 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/WriteType.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java index 793f0250ae9..d456d70d7ab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java @@ -8,7 +8,7 @@ /** * The policy that decides which Cassandra hosts to contact for each new query. - * + *

* Two methods need to be implemented: *

    *
  • {@link LoadBalancingPolicy#distance}: returns the "distance" of an @@ -16,8 +16,7 @@ *
  • {@link LoadBalancingPolicy#newQueryPlan}: it is used for each query to * find which host to query first, and which hosts to use as failover.
  • *
- * - * + *

* The {@code LoadBalancingPolicy} is a {@link Host.StateListener} and is thus * informed of hosts up/down events. For efficiency purposes, the policy is * expected to exclude down hosts from query plans. @@ -26,25 +25,25 @@ public interface LoadBalancingPolicy extends Host.StateListener { /** * Returns the distance assigned by this policy to the provided host. - * + *

* The distance of an host influence how much connections are kept to the - * node (see {@Link HostDistance}). A policy should assign a {@code + * node (see {@link HostDistance}). A policy should assign a {@code * LOCAL} distance to nodes that are susceptible to be returned first by * {@code newQueryPlan} and it is useless for {@code newQueryPlan} to * return hosts to which it assigns an {@code IGNORED} distance. - * + *

* The host distance is primarily used to prevent keeping too many * connections to host in remote datacenters when the policy itself always * picks host in the local datacenter first. * - * @param the host of which to return the distance of. - * @return the HostDistance to {@host}. + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. */ public HostDistance distance(Host host); /** * Returns the hosts to use for a new query. - * + *

* Each new query will call this method. The first host in the result will * then be used to perform the query. In the event of a connection problem * (the queried host is down or appear to be so), the next host will be @@ -73,11 +72,11 @@ public interface Factory { /** * A Round-robin load balancing policy. - * + *

* This policy queries nodes in a round-robin fashion. For a given query, * if an host fail, the next one (following the round-robin order) is * tried, until all hosts have been tried. - * + *

* This policy is not datacenter aware and will include every known * Cassandra host in its round robin algorithm. If you use multiple * datacenter this will be inefficient and you will want to use the @@ -95,13 +94,13 @@ private RoundRobin(Collection hosts) { /** * Return the HostDistance for the provided host. - * + *

* This policy consider all nodes as local. This is generally the right * thing to do in a single datacenter deployement. If you use multiple * datacenter, see {@link DCAwareRoundRobin} instead. * - * @param the host of which to return the distance of. - * @return the HostDistance to {@host}. + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. */ public HostDistance distance(Host host) { return HostDistance.LOCAL; @@ -109,7 +108,7 @@ public HostDistance distance(Host host) { /** * Returns the hosts to use for a new query. - * + *

* The returned plan will try each known host of the cluster. Upon each * call to this method, the ith host of the plans returned will cycle * over all the host of the cluster in a round-robin fashion. @@ -179,14 +178,14 @@ public LoadBalancingPolicy create(Collection hosts) { /** * A data-center aware Round-robin load balancing policy. - * + *

* This policy provides round-robin queries over the node of the local * datacenter. It also includes in the query plans returned a configurable * number of hosts in the remote datacenters, but those are always tried * after the local nodes. In other words, this policy guarantees that no * host in a remote datacenter will be queried unless no host in the local * datacenter can be reached. - * + *

* If used with a single datacenter, this policy is equivalent to the * {@code LoadBalancingPolicy.RoundRobin} policy, but its DC awareness * incurs a slight overhead so the {@code LoadBalancingPolicy.RoundRobin} @@ -221,16 +220,16 @@ private String dc(Host host) { /** * Return the HostDistance for the provided host. - * + *

* This policy consider nodes in the local datacenter as {@code LOCAL}. * For each remote datacenter, it considers a configurable number of * hosts as {@code REMOTE} and the rest is {@code IGNORED}. - * + *

* To configure how many host in each remote datacenter is considered * {@code REMOTE}, see {@link Factory#create(String, int)}. * - * @param the host of which to return the distance of. - * @return the HostDistance to {@host}. + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. */ public HostDistance distance(Host host) { String dc = dc(host); @@ -250,7 +249,7 @@ public HostDistance distance(Host host) { /** * Returns the hosts to use for a new query. - * + *

* The returned plan will always try each known host in the local * datacenter first, and then, if none of the local host is reacheable, * will try up to a configurable number of other host per remote datacenter. diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index c36f64137d2..ece30211dae 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -313,10 +313,7 @@ else if (cause instanceof DriverUncheckedException) throw new DriverInternalError("Unexpected exception thrown", cause); } - - // TODO: Convert to some internal exception static Exception convertException(org.apache.cassandra.exceptions.TransportException te) { - switch (te.code()) { case SERVER_ERROR: return new DriverInternalError("An unexpected error occured server side: " + te.getMessage()); @@ -335,7 +332,7 @@ static Exception convertException(org.apache.cassandra.exceptions.TransportExcep return new TruncateException(te.getMessage()); case WRITE_TIMEOUT: org.apache.cassandra.exceptions.WriteTimeoutException wte = (org.apache.cassandra.exceptions.WriteTimeoutException)te; - return new WriteTimeoutException(ConsistencyLevel.from(wte.consistency), wte.received, wte.blockFor); + return new WriteTimeoutException(ConsistencyLevel.from(wte.consistency), WriteType.from(wte.writeType), wte.received, wte.blockFor); case READ_TIMEOUT: org.apache.cassandra.exceptions.ReadTimeoutException rte = (org.apache.cassandra.exceptions.ReadTimeoutException)te; return new ReadTimeoutException(ConsistencyLevel.from(rte.consistency), rte.received, rte.blockFor, rte.dataPresent); diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java index 3ffd4d53364..a9e4be676a6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java @@ -1,39 +1,463 @@ package com.datastax.driver.core; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.log4j.Level; + /** - * A policy that defines a default comportment to adopt when a request returns + * A policy that defines a default behavior to adopt when a request returns * a TimeoutException or an UnavailableException. * - * TODO: is that really useful to have such details if one cannot modify the request? - * TODO: Fix javadoc + * Such policy allows to centralize the handling of query retries, allowing to + * minimize the need for exception catching/handling in business code. */ public interface RetryPolicy { - public boolean onReadTimeout(ConsistencyLevel cl, int required, int received, boolean dataPresent, int nbRetry); + /** + * A retry decision to adopt on a Cassandra exception (read/write timeout + * or unavailable exception). + *

+ * There is three possible decision: + *

    + *
  • RETHROW: no retry should be attempted and an exception should be thrown
  • + *
  • RETRY: the operation will be retried. The consistency level of the + * retry should be specified.
  • + *
  • IGNORE: no retry should be attempted and the exception should be + * ignored. In that case, the operation that triggered the Cassandra + * exception will return an empty result set.
  • + *
+ */ + public static class RetryDecision { + static enum Type { RETRY, RETHROW, IGNORE }; + final Type type; + final ConsistencyLevel retryCL; + + private RetryDecision(Type type, ConsistencyLevel retryCL) { + this.type = type; + this.retryCL = retryCL; + } + + /** + * Creates a RETHROW retry decision. + * + * @return a RETHROW retry decision. + */ + public static RetryDecision rethrow() { + return new RetryDecision(Type.RETHROW, null); + } + + /** + * Creates a RETRY retry decision using the provided consistency level. + * + * @param consistency the consistency level to use for the retry. + * @return a RETRY with consistency level {@code consistency} retry decision. + */ + public static RetryDecision retry(ConsistencyLevel consistency) { + return new RetryDecision(Type.RETRY, consistency); + } + + /** + * Creates an IGNORE retry decision. + * + * @return an IGNORE retry decision. + */ + public static RetryDecision ignore() { + return new RetryDecision(Type.IGNORE, null); + } + } + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * Note that this method may be called even if + * {@code requiredResponses >= receivedResponses} if {@code dataPresent} is + * {@code false} (see + * {@link com.datastax.driver.core.exceptions.ReadTimeoutException#wasDataRetrieved}). + * + * @param cl the original consistency level of the read that timeouted. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, + * a {@link com.datastax.driver.core.exceptions.ReadTimeoutException} will + * be thrown for the operation. + */ + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry); - public boolean onWriteTimeout(ConsistencyLevel cl, int required, int received, int nbRetry); + /** + * Defines whether to retry and at which consistency level on a write timeout. + * + * @param cl the original consistency level of the write that timeouted. + * @param writeType the type of the write that timeouted. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, + * a {@link com.datastax.driver.core.exceptions.WriteTimeoutException} will + * be thrown for the operation. + */ + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry); - public boolean onUnavailable(ConsistencyLevel cl, int required, int alive, int nbRetry); + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + * + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return the retry decision. If {@code RetryDecision.RETHROW} is returned, + * an {@link com.datastax.driver.core.exceptions.UnavailableException} will + * be thrown for the operation. + */ + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry); - public static class DefaultPolicy implements RetryPolicy { + /** + * The default retry policy. + *

+ * This policy retries queries in only two cases: + *

    + *
  • On a read timeout, if enough replica replied but data was not retrieved.
  • + *
  • On a write timeout, if we timeout while writting the distributed log used by batch statements.
  • + *
+ *

+ * This retry policy is conservative in that it will never retry with a + * different consistency level than the one of the initial operation. + */ + public static class Default implements RetryPolicy { + + public static final Default INSTANCE = new Default(); + + private Default() {} + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * This method triggers a maximum of one retry, and only if enough + * replica had responded to the read request but data was not retrieved + * amongst those. Indeed, that case usually means that enough replica + * are alive to satisfy the consistency but the coordinator picked a + * dead one for data retrieval, not having detecte that replica as dead + * yet. The reasoning for retrying then is that by the time we get the + * timeout the dead replica will likely have been detected as dead and + * the retry has a high change of success. + * + * @param cl the original consistency level of the read that timeouted. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and + * {@code receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} otherwise. + */ + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + return receivedResponses >= requiredResponses && !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + *

+ * This method triggers a maximum of one retry, and only in the case of + * a {@code WriteType.BATCH_LOG} write. The reasoning for the retry in + * that case is that write to the distributed batch log is tried by the + * coordinator of the write against a small subset of all the node alive + * in the local datacenter. Hence, a timeout usually means that none of + * the nodes in that subset were alive but the coordinator hasn't + * detected them as dead. By the time we get the timeout the dead + * nodes will likely have been detected as dead and the retry has thus a + * high change of success. + * + * @param cl the original consistency level of the write that timeouted. + * @param writeType the type of the write that timeouted. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and + * {@code writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise. + */ + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // If the batch log write failed, retry the operation as this might just be we were unlucky at picking candidtes + return writeType == WriteType.BATCH_LOG ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + *

+ * This method never retries as a retry on an unavailable exception + * using the same consistency level has almost no change of success. + * + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + return RetryDecision.rethrow(); + } + } - public static final DefaultPolicy INSTANCE = new DefaultPolicy(); + /** + * A retry policy that sometimes retry with a lower consistency level than + * the one initially requested. + *

+ * BEWARE: This policy may retry queries using a lower consistency + * level than the one initially requested. By doing so, it may break + * consistency guarantees. In other words, if you use this retry policy, + * there is cases (documented below) where a read at {@code QUORUM} + * may not see a preceding write at {@code QUORUM}. Do not use this + * policy unless you have understood the cases where this can happen and + * are ok with that. It is also highly recommended to always wrap this + * policy into {@link RetryPolicy.RetryLogger} to log the occurences of + * such consistency break. + *

+ * This policy implements the same retries than the {@link Default} policy. + * But on top of that, it also retries in the following cases: + *

    + *
  • On a read timeout: if the number of replica that responded is + * greater than one but lower than is required by the requested + * consistency level, the operation is retried at a lower concistency + * level.
  • + *
  • On a write timeout: if the operation is an {@code + * WriteType.UNLOGGED_BATCH} and at least one replica acknowleged the + * write, the operation is retried at a lower consistency level. + * Furthermore, for other operation, if at least one replica acknowleged + * the write, the timeout is ignored.
  • + *
  • On an unavailable exception: if at least one replica is alive, the + * operation is retried at a lower consistency level.
  • + *
+ *

+ * The reasoning behing this retry policy is the following one. If, based + * on the information the Cassandra coordinator node returns, retrying the + * operation with the initally requested consistency has a change to + * succeed, do it. Otherwise, if based on these informations we know the + * initially requested consistency level cannot be achieve currently, then: + *

    + *
  • For writes, ignore the exception (thus silently failing the + * consistency requirement) if we know the write has been persisted on at + * least one replica.
  • + *
  • For reads, try reading at a lower consistency level (thus silently + * failing the consistency requirement).
  • + *
+ * In other words, this policy implements the idea that if the requested + * consistency level cannot be achieved, the next best thing for writes is + * to make sure the data is persisted, and that reading something is better + * than reading nothing, even if there is a risk of reading stale data. + */ + public static class DowngradingConsistency implements RetryPolicy { - private DefaultPolicy() {} + public static final DowngradingConsistency INSTANCE = new DowngradingConsistency(); - public boolean onReadTimeout(ConsistencyLevel cl, int required, int received, boolean dataPresent, int nbRetry) { - if (nbRetry > 1) - return false; + private DowngradingConsistency() {} + + private RetryDecision maxLikelyToWorkCL(int knownOk) { + if (knownOk >= 3) + return RetryDecision.retry(ConsistencyLevel.THREE); + else if (knownOk >= 2) + return RetryDecision.retry(ConsistencyLevel.TWO); + else if (knownOk >= 1) + return RetryDecision.retry(ConsistencyLevel.ONE); + else + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * This method triggers a maximum of one retry. If less replica + * responsed than required by the consistency level (but at least one + * replica did respond), the operation is retried at a lower + * consistency level. If enough replica responded but data was not + * retrieve, the operation is retried with the initial consistency + * level. Otherwise, an exception is thrown. + * + * @param cl the original consistency level of the read that timeouted. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + if (receivedResponses < requiredResponses) { + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(receivedResponses); + } + + return !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + *

+ * This method triggers a maximum of one retry. If {@code writeType == + * WriteType.BATCH_LOG}, the write is retried with the initial + * consistency level. If {@code writeType == WriteType.UNLOGGED_BATCH} + * and at least one replica acknowleged, the write is retried with a + * lower consistency level (with unlogged batch, a write timeout can + * always mean that part of the batch haven't been persisted at + * all, even if {@code receivedAcks > 0}). For other {@code writeType}, + * if we know the write has been persisted on at least one replica, we + * ignore the exception. Otherwise, an exception is thrown. + * + * @param cl the original consistency level of the write that timeouted. + * @param writeType the type of the write that timeouted. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + switch (writeType) { + case SIMPLE: + case BATCH: + // Since we provide atomicity there is no point in retrying + return RetryDecision.ignore(); + case COUNTER: + // We should not retry counters, period! + return RetryDecision.ignore(); + case UNLOGGED_BATCH: + // Since only part of the batch could have been persisted, + // retry with whatever consistency should allow to persist all + return maxLikelyToWorkCL(receivedAcks); + case BATCH_LOG: + return RetryDecision.retry(cl); + } + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + *

+ * This method triggers a maximum of one retry. If at least one replica + * is know to be alive, the operation is retried at a lower consistency + * level. + * + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(aliveReplica); + } + } + + /** + * A retry policy that wraps another policy, logging the decision made by its sub-policy. + *

+ * Note that this policy only log the IGNORE and RETRY decisions (since + * RETHROW decisions just amount to propate the cassandra exception). The + * logging is done at the INFO level. + */ + public static class RetryLogger implements RetryPolicy { + + private static final Logger logger = LoggerFactory.getLogger(RetryLogger.class); + private final RetryPolicy policy; + + private RetryLogger(RetryPolicy policy) { + this.policy = policy; + } + + /** + * Creates a new {@code RetryPolicy} that logs the decision of {@code policy}. + * + * @param policy the policy to wrap. The policy created by this method + * will return the same decision than {@code policy} but will log them. + * @return the newly create logging policy. + */ + public static RetryPolicy wrap(RetryPolicy policy) { + return new RetryLogger(policy); + } + + private static ConsistencyLevel cl(ConsistencyLevel cl, RetryDecision decision) { + return decision.retryCL == null ? cl : decision.retryCL; + } - return received >= required && !dataPresent; + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + RetryDecision decision = policy.onReadTimeout(cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); + switch (decision.type) { + case IGNORE: + String f1 = "Ignoring read timeout (initial consistency: %s, required responses: %i, received responses: %i, data retrieved: %b, retries: %i)"; + logger.info(String.format(f1, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on read timeout at consistency %s (initial consistency: %s, required responses: %i, received responses: %i, data retrieved: %b, retries: %i)"; + logger.info(String.format(f2, cl(cl, decision), cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); + break; + } + return decision; } - public boolean onWriteTimeout(ConsistencyLevel cl, int required, int received, int nbRetry) { - return false; + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + RetryDecision decision = policy.onWriteTimeout(cl, writeType, requiredAcks, receivedAcks, nbRetry); + switch (decision.type) { + case IGNORE: + String f1 = "Ignoring write timeout (initial consistency: %s, write type: %s, required acknowledgments: %i, received acknowledgments: %i, retries: %i)"; + logger.info(String.format(f1, cl, writeType, requiredAcks, receivedAcks, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on write timeout at consistency %s(initial consistency: %s, write type: %s, required acknowledgments: %i, received acknowledgments: %i, retries: %i)"; + logger.info(String.format(f2, cl(cl, decision), cl, writeType, requiredAcks, receivedAcks, nbRetry)); + break; + } + return decision; } - public boolean onUnavailable(ConsistencyLevel cl, int required, int alive, int nbRetry) { - return false; + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + RetryDecision decision = policy.onUnavailable(cl, requiredReplica, aliveReplica, nbRetry); + switch (decision.type) { + case IGNORE: + String f1 = "Ignoring unavailable exception (initial consistency: %s, required replica: %i, alive replica: %i, retries: %i)"; + logger.info(String.format(f1, cl, requiredReplica, aliveReplica, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on unavailable exception at consistency %s (initial consistency: %s, required replica: %i, alive replica: %i, retries: %i)"; + logger.info(String.format(f2, cl(cl, decision), cl, requiredReplica, aliveReplica, nbRetry)); + break; + } + return decision; } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 4f0f65fcd95..eb585bb2d66 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -17,6 +17,7 @@ import org.apache.cassandra.transport.messages.ExecuteMessage; import org.apache.cassandra.transport.messages.PrepareMessage; import org.apache.cassandra.transport.messages.QueryMessage; +import org.apache.cassandra.transport.messages.ResultMessage; import org.apache.cassandra.exceptions.UnavailableException; import org.apache.cassandra.exceptions.PreparedQueryNotFoundException; import org.apache.cassandra.exceptions.ReadTimeoutException; @@ -40,8 +41,8 @@ class RetryingCallback implements Connection.ResponseCallback { private final Iterator queryPlan; private volatile Host current; - private final boolean isQuery; private volatile int queryRetries; + private volatile ConsistencyLevel retryConsistencyLevel; private volatile Map errors; @@ -50,7 +51,6 @@ public RetryingCallback(Session.Manager manager, Connection.ResponseCallback cal this.callback = callback; this.queryPlan = manager.loadBalancer.newQueryPlan(); - this.isQuery = request() instanceof QueryMessage || request() instanceof ExecuteMessage; } public void sendRequest() { @@ -95,8 +95,9 @@ private void logError(InetSocketAddress address, String msg) { errors.put(address, msg); } - private void retry(final boolean retryCurrent) { + private void retry(final boolean retryCurrent, ConsistencyLevel newConsistencyLevel) { final Host h = current; + this.retryConsistencyLevel = newConsistencyLevel; // We should not retry on the current thread as this will be an IO thread. manager.cluster.manager.executor.execute(new Runnable() { @@ -111,7 +112,22 @@ public void run() { } public Message.Request request() { - return callback.request(); + + Message.Request request = callback.request(); + if (retryConsistencyLevel != null) { + org.apache.cassandra.db.ConsistencyLevel cl = ConsistencyLevel.toCassandraCL(retryConsistencyLevel); + if (request instanceof QueryMessage) { + QueryMessage qm = (QueryMessage)request; + if (qm.consistency != cl) + request = new QueryMessage(qm.query, cl); + } + else if (request instanceof ExecuteMessage) { + ExecuteMessage em = (ExecuteMessage)request; + if (em.consistency != cl) + request = new ExecuteMessage(em.statementId, em.values, cl); + } + } + return request; } public void onSet(Connection connection, Message.Response response) { @@ -121,7 +137,7 @@ public void onSet(Connection connection, Message.Response response) { break; case ERROR: ErrorMessage err = (ErrorMessage)response; - boolean retry = false; + RetryPolicy.RetryDecision retry = null; switch (err.error.code()) { case READ_TIMEOUT: assert err.error instanceof ReadTimeoutException; @@ -133,7 +149,7 @@ public void onSet(Connection connection, Message.Response response) { assert err.error instanceof WriteTimeoutException; WriteTimeoutException wte = (WriteTimeoutException)err.error; ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); - retry = manager.retryPolicy.onWriteTimeout(wcl, wte.received, wte.blockFor, queryRetries); + retry = manager.retryPolicy.onWriteTimeout(wcl, WriteType.from(wte.writeType), wte.received, wte.blockFor, queryRetries); break; case UNAVAILABLE: assert err.error instanceof UnavailableException; @@ -142,16 +158,15 @@ public void onSet(Connection connection, Message.Response response) { retry = manager.retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); break; case OVERLOADED: - // TODO: maybe we could make that part of the retrying policy? - if (queryRetries == 0) - retry = true; - break; + // Try another node + retry(false, null); + return; case IS_BOOTSTRAPPING: // TODO: log error as this shouldn't happen - // retry once - if (queryRetries == 0) - retry = true; - break; + // Try another node + logger.error("Query sent to %s but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); + retry(false, null); + return; case UNPREPARED: assert err.error instanceof PreparedQueryNotFoundException; PreparedQueryNotFoundException pqnf = (PreparedQueryNotFoundException)err.error; @@ -167,29 +182,34 @@ public void onSet(Connection connection, Message.Response response) { try { Message.Response prepareResponse = connection.write(new PrepareMessage(toPrepare)).get(); // TODO check return ? - retry = true; + retry = RetryPolicy.RetryDecision.retry(null); } catch (InterruptedException e) { logError(connection.address, "Interrupted while preparing query to execute"); - retry(false); + retry(false, null); return; } catch (ExecutionException e) { logError(connection.address, "Unexpected problem while preparing query to execute: " + e.getCause().getMessage()); - retry(false); + retry(false, null); return; } catch (ConnectionException e) { logger.debug("Connection exception while preparing missing statement", e); logError(e.address, e.getMessage()); - retry(false); + retry(false, null); return; } } - if (retry) { - ++queryRetries; - retry(true); - } else { - callback.onSet(connection, response); + switch (retry.type) { + case RETRY: + ++queryRetries; + retry(true, retry.retryCL); + break; + case RETHROW: + callback.onSet(connection, response); + break; + case IGNORE: + callback.onSet(connection, ResultMessage.Void.instance()); + break; } - break; default: callback.onSet(connection, response); @@ -202,7 +222,7 @@ public void onException(Exception exception) { if (exception instanceof ConnectionException) { ConnectionException ce = (ConnectionException)exception; logError(ce.address, ce.getMessage()); - retry(false); + retry(false, null); return; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index e3553b729cd..96080a355f0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -268,7 +268,7 @@ static class Manager implements Host.StateListener { final LoadBalancingPolicy loadBalancer; // TODO: make that configurable - final RetryPolicy retryPolicy = RetryPolicy.DefaultPolicy.INSTANCE; + final RetryPolicy retryPolicy = RetryPolicy.Default.INSTANCE; final HostConnectionPool.Configuration poolsConfiguration; diff --git a/driver-core/src/main/java/com/datastax/driver/core/WriteType.java b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java new file mode 100644 index 00000000000..cdc49a00474 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java @@ -0,0 +1,44 @@ +package com.datastax.driver.core; + +/** + * The type of a Cassandra write query. + * + * This information is returned by Cassandra when a write timout is raised to + * indicate what type of write timeouted. This information is useful to decide + * which retry policy to adopt. + */ +public enum WriteType +{ + /** A write to a single partition key. Such writes are guaranteed to be atomic and isolated. */ + SIMPLE, + /** A write to a multiple partition key that used the distributed batch log to ensure atomicity. */ + BATCH, + /** A write to a multiple partition key that doesn't use the distributed batch log. Atomicity for such writes is not guaranteed */ + UNLOGGED_BATCH, + /** A counter write (that can be for one or multiple partition key). Such write should not be replayed to avoid overcount. */ + COUNTER, + /** The initial write to the distributed batch log that Cassandra performs internally before a BATCH write. */ + BATCH_LOG; + + static WriteType from(org.apache.cassandra.db.WriteType writeType) { + switch (writeType) { + case SIMPLE: return SIMPLE; + case BATCH: return BATCH; + case UNLOGGED_BATCH: return UNLOGGED_BATCH; + case COUNTER: return COUNTER; + case BATCH_LOG: return BATCH_LOG; + } + throw new AssertionError(); + } + + static org.apache.cassandra.db.WriteType toCassandraWriteType(WriteType writeType) { + switch (writeType) { + case SIMPLE: return org.apache.cassandra.db.WriteType.SIMPLE; + case BATCH: return org.apache.cassandra.db.WriteType.BATCH; + case UNLOGGED_BATCH: return org.apache.cassandra.db.WriteType.UNLOGGED_BATCH; + case COUNTER: return org.apache.cassandra.db.WriteType.COUNTER; + case BATCH_LOG: return org.apache.cassandra.db.WriteType.BATCH_LOG; + } + throw new AssertionError(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java index 66d16a8803a..5b35985c078 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/WriteTimeoutException.java @@ -1,16 +1,29 @@ package com.datastax.driver.core.exceptions; import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.WriteType; /** * A Cassandra timeout during a write query. */ public class WriteTimeoutException extends QueryTimeoutException { - public WriteTimeoutException(ConsistencyLevel consistency, int received, int required) { + private final WriteType writeType; + + public WriteTimeoutException(ConsistencyLevel consistency, WriteType writeType, int received, int required) { super(String.format("Cassandra timeout during write query at consitency %s (%d replica acknowledged the write over %d required)", consistency, received, required), consistency, received, required); + this.writeType = writeType; + } + + /** + * The type of the write for which a timeout was raised. + * + * @return the type of the write for which a timeout was raised. + */ + public WriteType getWriteType() { + return writeType; } } From 978968c011084fcd315477bbaea9b0ef2dcba4ab Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 19 Oct 2012 15:06:49 +0200 Subject: [PATCH 052/719] Make QueryExceptionException unchecked --- .../com/datastax/driver/core/ResultSet.java | 12 ++++++--- .../com/datastax/driver/core/Session.java | 26 ++++++++++++++++--- .../exceptions/QueryExecutionException.java | 2 +- 3 files changed, 31 insertions(+), 9 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index ece30211dae..ca76e9d5f45 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -239,8 +239,10 @@ public void onException(Exception exception) { * @throws QueryExecutionException if the query triggered an execution * exception, i.e. an exception thrown by Cassandra when it cannot execute * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). */ - public ResultSet getUninterruptibly() throws NoHostAvailableException, QueryExecutionException { + public ResultSet getUninterruptibly() throws NoHostAvailableException { try { while (true) { try { @@ -273,11 +275,13 @@ public ResultSet getUninterruptibly() throws NoHostAvailableException, QueryExec * @throws QueryExecutionException if the query triggered an execution * exception, i.e. an exception thrown by Cassandra when it cannot execute * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). * @throws TimeoutException if the wait timed out (Note that this is * different from a Cassandra timeout, which is a {@code * QueryExecutionException}). */ - public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAvailableException, QueryExecutionException, TimeoutException { + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAvailableException, TimeoutException { long start = System.nanoTime(); long timeoutNanos = unit.toNanos(timeout); try { @@ -298,11 +302,11 @@ public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAv } } - static void extractCauseFromExecutionException(ExecutionException e) throws NoHostAvailableException, QueryExecutionException { + static void extractCauseFromExecutionException(ExecutionException e) throws NoHostAvailableException { extractCause(e.getCause()); } - static void extractCause(Throwable cause) throws NoHostAvailableException, QueryExecutionException { + static void extractCause(Throwable cause) throws NoHostAvailableException { if (cause instanceof NoHostAvailableException) throw (NoHostAvailableException)cause; else if (cause instanceof QueryExecutionException) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 96080a355f0..f215c5a9f98 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -49,8 +49,10 @@ public class Session { * @throws QueryExecutionException if the query triggered an execution * exception, i.e. an exception thrown by Cassandra when it cannot execute * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). */ - public ResultSet execute(String query) throws NoHostAvailableException, QueryExecutionException { + public ResultSet execute(String query) throws NoHostAvailableException { return execute(query, null); } @@ -78,8 +80,10 @@ public ResultSet execute(String query) throws NoHostAvailableException, QueryExe * @throws QueryExecutionException if the query triggered an execution * exception, i.e. an exception thrown by Cassandra when it cannot execute * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). */ - public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHostAvailableException, QueryExecutionException { + public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHostAvailableException { return executeAsync(query, consistency).getUninterruptibly(); } @@ -148,8 +152,15 @@ public PreparedStatement prepare(String query) throws NoHostAvailableException { * be empty (and will be for any non SELECT query). * * @throws IllegalStateException if {@code !stmt.ready()}. + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). */ - public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException, QueryExecutionException { + public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException { return executePrepared(stmt); } @@ -171,8 +182,15 @@ public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableExce * be empty (and will be for any non SELECT query). * * @throws IllegalStateException if {@code !stmt.ready()}. + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). */ - public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consistency) throws NoHostAvailableException, QueryExecutionException { + public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consistency) throws NoHostAvailableException { return executePreparedAsync(stmt, consistency).getUninterruptibly(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java index fb4c7c6dd7e..c789270e848 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryExecutionException.java @@ -6,7 +6,7 @@ * This correspond to the exception that Cassandra throw when a (valid) query * cannot be executed (TimeoutException, UnavailableException, ...). */ -public class QueryExecutionException extends DriverException { +public class QueryExecutionException extends DriverUncheckedException { protected QueryExecutionException(String msg) { super(msg); From a48a5e61f28643c8a68384ce4754fe656d7bb8e4 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 24 Oct 2012 16:57:00 +0200 Subject: [PATCH 053/719] Expose configuration options --- .../com/datastax/driver/core/Cluster.java | 166 +++++-- .../com/datastax/driver/core/Connection.java | 40 +- .../driver/core/ConnectionsConfiguration.java | 415 ++++++++++++++++++ .../driver/core/ConvictionPolicy.java | 39 +- .../driver/core/HostConnectionPool.java | 69 +-- .../com/datastax/driver/core/Policies.java | 33 ++ .../com/datastax/driver/core/ResultSet.java | 2 +- .../driver/core/RetryingCallback.java | 9 +- .../com/datastax/driver/core/Session.java | 23 +- .../driver/core/SimpleConvictionPolicy.java | 28 -- .../com/datastax/driver/core/SessionTest.java | 2 +- 11 files changed, 689 insertions(+), 137 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/Policies.java delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 7cf03eb29f9..65307a4ef1a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -46,24 +46,24 @@ public class Cluster { final Manager manager; - private Cluster(List contactPoints) throws NoHostAvailableException { - this.manager = new Manager(contactPoints); + private Cluster(List contactPoints, Policies policies) throws NoHostAvailableException { + this.manager = new Manager(contactPoints, policies); } /** - * Build a new cluster based on the provided configuration. - * + * Build a new cluster based on the provided initializer. + *

* Note that for building a cluster programmatically, Cluster.Builder * provides a slightly less verbose shortcut with {@link Builder#build}. * - * @param config the Cluster.Configuration to use + * @param initializer the Cluster.Initializer to use * @return the newly created Cluster instance * * @throws NoHostAvailableException if no host amongst the contact points * can be reached. */ - public static Cluster buildFrom(Configuration config) throws NoHostAvailableException { - return new Cluster(config.getContactPoints()); + public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableException { + return new Cluster(initializer.getContactPoints(), initializer.getPolicies()); } /** @@ -104,17 +104,17 @@ public Session connect(String keyspace) throws NoHostAvailableException { return session; } - /** - * Creates a new session on this cluster and sets a keyspace to use. - * - * @param authInfo The authorisation credentials to use to connect to - * Cassandra nodes. - * @return a new session on this cluster sets to keyspace - * {@code keyspaceName}. - * - * @throws NoHostAvailableException if no host can be contacted to set the - * {@code keyspace}. - */ + ///** + // * Creates a new session on this cluster and sets a keyspace to use. + // * + // * @param authInfo The authorisation credentials to use to connect to + // * Cassandra nodes. + // * @return a new session on this cluster sets to keyspace + // * {@code keyspaceName}. + // * + // * @throws NoHostAvailableException if no host can be contacted to set the + // * {@code keyspace}. + // */ // Session session = connect(authInfo); // session.manager.setKeyspace(keyspace); // return session; @@ -122,7 +122,7 @@ public Session connect(String keyspace) throws NoHostAvailableException { /** * Returns read-only metadata on the connected cluster. - * + *

* This includes the know nodes (with their status as seen by the driver) * as well as the schema definitions. * @@ -133,9 +133,9 @@ public ClusterMetadata getMetadata() { } /** - * Configuration for {@link Cluster} instances. + * Initializer for {@link Cluster} instances. */ - public interface Configuration { + public interface Initializer { /** * Returns the initial Cassandra hosts to connect to. @@ -144,14 +144,25 @@ public interface Configuration { * for more details on contact points. */ public List getContactPoints(); + + /** + * Returns the policies to use for this cluster. + * + * @return the policies to use for this cluster. + */ + public Policies getPolicies(); } /** * Helper class to build {@link Cluster} instances. */ - public static class Builder implements Configuration { + public static class Builder implements Initializer { + + private final List addresses = new ArrayList(); - private List addresses = new ArrayList(); + private LoadBalancingPolicy.Factory loadBalancingPolicyFactory; + private ReconnectionPolicy.Factory reconnectionPolicyFactory; + private RetryPolicy retryPolicy; public List getContactPoints() { return addresses; @@ -261,7 +272,67 @@ public Builder addContactPoints(InetSocketAddress... addresses) { } /** - * Build the cluster with the configured set of initial contact points. + * Configure the load balancing policy (factory) to use for the new cluster. + *

+ * If no load balancing policy factory is set through this method, + * {@link Policies#DEFAULT_LOAD_BALANCING_POLICY_FACTORY} will be used instead. + * + * @param factory the load balancing policy factory to use + * @return this Builder + */ + public Builder withLoadBalancingPolicyFactory(LoadBalancingPolicy.Factory factory) { + this.loadBalancingPolicyFactory = factory; + return this; + } + + /** + * Configure the reconnection policy (factory) to use for the new cluster. + *

+ * If no reconnection policy factory is set through this method, + * {@link Policies#DEFAULT_RECONNECTION_POLICY_FACTORY} will be used instead. + * + * @param factory the reconnection policy factory to use + * @return this Builder + */ + public Builder withReconnectionPolicyFactory(ReconnectionPolicy.Factory factory) { + this.reconnectionPolicyFactory = factory; + return this; + } + + /** + * Configure the retry policy to use for the new cluster. + *

+ * If no retry policy is set through this method, + * {@link Policies#DEFAULT_RETRY_POLICY} will be used instead. + * + * @param policy the retry policy to use + * @return this Builder + */ + public Builder withRetryPolicy(RetryPolicy policy) { + this.retryPolicy = policy; + return this; + } + + /** + * Returns the policies to use for this cluster. + *

+ * The policies used are the one set by the {@code with*} methods of + * this builder, or the default ones defined in {@link Policies} for + * the policies that hasn't been explicitely set. + * + * @return the policies to use for this cluster. + */ + public Policies getPolicies() { + return new Policies( + loadBalancingPolicyFactory == null ? Policies.DEFAULT_LOAD_BALANCING_POLICY_FACTORY : loadBalancingPolicyFactory, + reconnectionPolicyFactory == null ? Policies.DEFAULT_RECONNECTION_POLICY_FACTORY : reconnectionPolicyFactory, + retryPolicy == null ? Policies.DEFAULT_RETRY_POLICY : retryPolicy + ); + } + + /** + * Build the cluster with the configured set of initial contact points + * and policies. * * This is a shorthand for {@code Cluster.buildFrom(this)}. * @@ -275,6 +346,38 @@ public Cluster build() throws NoHostAvailableException { } } + /** + * The configuration of the cluster. + */ + public static class Configuration { + + private final Policies policies; + private final ConnectionsConfiguration connections = new ConnectionsConfiguration(); + + private Configuration(Policies policies) { + this.policies = policies; + } + + /** + * The policies set for the cluster. + * + * @return the policies set for the cluster. + */ + public Policies getPolicies() { + return policies; + } + + /** + * Configuration related to the connections the driver maintains to the + * Cassandra hosts. + * + * @return the configuration of the connections to Cassandra hosts. + */ + public ConnectionsConfiguration getConnectionsConfiguration() { + return connections; + } + } + /** * The sessions and hosts managed by this a Cluster instance. * @@ -286,19 +389,15 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // Initial contacts point final List contactPoints; - private final Set sessions = new CopyOnWriteArraySet(); + final ClusterMetadata metadata; + final Configuration configuration; - // TODO: Make that configurable - final ConvictionPolicy.Factory convictionPolicyFactory = new SimpleConvictionPolicy.Factory(); - final ReconnectionPolicy.Factory reconnectionPolicyFactory = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); final Connection.Factory connectionFactory; private final ControlConnection controlConnection; - // TODO: make configurable - final LoadBalancingPolicy.Factory loadBalancingFactory = LoadBalancingPolicy.DCAwareRoundRobin.Factory.create("dc1", 1); - //final LoadBalancingPolicy.Factory loadBalancingFactory = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE; + final ConvictionPolicy.Factory convictionPolicyFactory = new ConvictionPolicy.Simple.Factory(); final ScheduledExecutorService reconnectionExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Reconnection")); final ScheduledExecutorService scheduledTasksExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Scheduled Tasks")); @@ -312,7 +411,8 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // less clear behavior. final Map preparedQueries = new ConcurrentHashMap(); - private Manager(List contactPoints) throws NoHostAvailableException { + private Manager(List contactPoints, Policies policies) throws NoHostAvailableException { + this.configuration = new Configuration(policies); this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; this.connectionFactory = new Connection.Factory(this); @@ -353,7 +453,7 @@ public void onDown(final Host host) { // Note: we basically waste the first successful reconnection, but it's probably not a big deal logger.debug(String.format("%s is down, scheduling connection retries", host)); - new AbstractReconnectionHandler(reconnectionExecutor, reconnectionPolicyFactory.create(), host.reconnectionAttempt) { + new AbstractReconnectionHandler(reconnectionExecutor, configuration.getPolicies().getReconnectionPolicyFactory().create(), host.reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { return connectionFactory.open(host); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 11dcb9cec46..8bf1ac30414 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -104,10 +104,12 @@ private void initializeTransport() throws ConnectionException { // TODO: we will need to get fancy about handling protocol version at // some point, but keep it simple for now. - // TODO: we need to allow setting the compression to use Map options = new HashMap() {{ put(StartupMessage.CQL_VERSION, CQL_VERSION); }}; + ConnectionsConfiguration.ProtocolOptions.Compression compression = factory.configuration.getProtocolOptions().getCompression(); + if (compression != ConnectionsConfiguration.ProtocolOptions.Compression.NONE) + options.put(StartupMessage.COMPRESSION, compression.toString()); StartupMessage startup = new StartupMessage(options); try { Message.Response response = write(startup).get(); @@ -275,15 +277,20 @@ public boolean isClosed() { public static class Factory { - // TODO We could share those amongst factories private final ExecutorService bossExecutor = Executors.newCachedThreadPool(); private final ExecutorService workerExecutor = Executors.newCachedThreadPool(); private final ConcurrentMap idGenerators = new ConcurrentHashMap(); private final DefaultResponseHandler defaultHandler; + private final ConnectionsConfiguration configuration; - public Factory(DefaultResponseHandler defaultHandler) { + public Factory(Cluster.Manager manager) { + this(manager, manager.configuration.getConnectionsConfiguration()); + } + + private Factory(DefaultResponseHandler defaultHandler, ConnectionsConfiguration configuration) { this.defaultHandler = defaultHandler; + this.configuration = configuration; } /** @@ -295,7 +302,7 @@ public Factory(DefaultResponseHandler defaultHandler) { */ public Connection open(Host host) throws ConnectionException { InetSocketAddress address = host.getAddress(); - String name =address.toString() + "-" + getIdGenerator(host).getAndIncrement(); + String name = address.toString() + "-" + getIdGenerator(host).getAndIncrement(); return new Connection(name, address, this); } @@ -313,10 +320,27 @@ private AtomicInteger getIdGenerator(Host host) { private ClientBootstrap bootstrap() { ClientBootstrap b = new ClientBootstrap(new NioClientSocketChannelFactory(bossExecutor, workerExecutor)); - // TODO: handle this better (use SocketChannelConfig) - b.setOption("connectTimeoutMillis", 10000); - b.setOption("tcpNoDelay", true); - b.setOption("keepAlive", true); + ConnectionsConfiguration.SocketOptions options = configuration.getSocketOptions(); + + b.setOption("connectTimeoutMillis", options.getConnectTimeoutMillis()); + Boolean keepAlive = options.getKeepAlive(); + if (keepAlive != null) + b.setOption("keepAlive", keepAlive); + Boolean reuseAddress = options.getReuseAddress(); + if (reuseAddress != null) + b.setOption("reuseAddress", reuseAddress); + Integer soLinger = options.getSoLinger(); + if (soLinger != null) + b.setOption("soLinger", soLinger); + Boolean tcpNoDelay = options.getTcpNoDelay(); + if (tcpNoDelay != null) + b.setOption("tcpNoDelay", tcpNoDelay); + Integer receiveBufferSize = options.getReceiveBufferSize(); + if (receiveBufferSize != null) + b.setOption("receiveBufferSize", receiveBufferSize); + Integer sendBufferSize = options.getSendBufferSize(); + if (sendBufferSize != null) + b.setOption("sendBufferSize", sendBufferSize); return b; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java new file mode 100644 index 00000000000..36056e14cd4 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java @@ -0,0 +1,415 @@ +package com.datastax.driver.core; + +/** + * Handle all configuration related of the connections to the Cassandra hosts. + * + * This handle setting: + *

    + *
  • low-level tcp configuration options (tcpNoDelay, keepAlive, ...).
  • + *
  • Cassandra binary protocol level configuration (compression).
  • + *
  • Connection pooling configurations.
  • + *
+ */ +public class ConnectionsConfiguration { + + private final SocketOptions socketOptions = new SocketOptions(); + private final ProtocolOptions protocolOptions = new ProtocolOptions(); + private final PoolingOptions poolingOptions = new PoolingOptions(); + + /** + * The socket options. + * + * @return the socket options. + */ + public SocketOptions getSocketOptions() { + return socketOptions; + } + + /** + * The protocol options. + * + * @return the protocol options. + */ + public ProtocolOptions getProtocolOptions() { + return protocolOptions; + } + + /** + * The pooling options. + * + * @return the pooling options. + */ + public PoolingOptions getPoolingOptions() { + return poolingOptions; + } + + /** + * Options to configure low-level socket options for the connections kept + * to the Cassandra hosts. + */ + public static class SocketOptions { + + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 5000; + + private volatile int connectTimeoutMillis = DEFAULT_CONNECT_TIMEOUT_MILLIS; + private volatile Boolean keepAlive; + private volatile Boolean reuseAddress; + private volatile Integer soLinger; + private volatile Boolean tcpNoDelay; + private volatile Integer receiveBufferSize; + private volatile Integer sendBufferSize; + + public int getConnectTimeoutMillis() { + return connectTimeoutMillis; + } + + public void setConnectTimeoutMillis(int connectTimeoutMillis) { + this.connectTimeoutMillis = connectTimeoutMillis; + } + + public Boolean getKeepAlive() { + return keepAlive; + } + + public void setKeepAlive(boolean keepAlive) { + this.keepAlive = keepAlive; + } + + public Boolean getReuseAddress() { + return reuseAddress; + } + + public void setReuseAddress(boolean reuseAddress) { + this.reuseAddress = reuseAddress; + } + + public Integer getSoLinger() { + return soLinger; + } + + public void setSoLinger(int soLinger) { + this.soLinger = soLinger; + } + + public Boolean getTcpNoDelay() { + return tcpNoDelay; + } + + public void setTcpNoDelay(boolean tcpNoDelay) { + this.tcpNoDelay = tcpNoDelay; + } + + public Integer getReceiveBufferSize() { + return receiveBufferSize; + } + + public void setReceiveBufferSize(int receiveBufferSize) { + this.receiveBufferSize = receiveBufferSize; + } + + public Integer getSendBufferSize() { + return sendBufferSize; + } + + public void setSendBufferSize(int sendBufferSize) { + this.sendBufferSize = sendBufferSize; + } + } + + /** + * Options of the Cassandra native binary protocol. + */ + public static class ProtocolOptions { + + /** + * Compression supported by the Cassandra binary protocol. + */ + public enum Compression { + /** No compression */ + NONE(""), + /** Snappy compression */ + SNAPPY("snappy"); + + final String protocolName; + + private Compression(String protocolName) { + this.protocolName = protocolName; + } + + @Override + public String toString() { + return protocolName; + } + }; + + private volatile Compression compression = Compression.NONE; + + /** + * Returns the compression used by the protocol. + *

+ * The default compression is {@code Compression.SNAPPY}. + * + * @return the compression used. + */ + public Compression getCompression() { + return compression; + } + + /** + * Sets the compression to use. + *

+ * Note that while this setting can be changed at any time, it will + * only apply to newly created connections. + * + * @param compression the compression algorithm to use (or {@code + * Compression.NONE} to disable compression). + * @return this {@code ProtocolOptions} object. + */ + public ProtocolOptions setCompression(Compression compression) { + this.compression = compression; + return this; + } + } + + /** + * Options related to connection pooling. + *

+ * The driver uses connections in an asynchronous way. Meaning that + * multiple requests can be submitted on the same connection at the same + * time. This means that the driver only needs to maintain a relatively + * small number of connections to each Cassandra host. These options allow + * to control how many connections are kept exactly. + *

+ * For each host, the driver keeps a core amount of connections open at all + * time ({@link PoolingOptions#getCoreConnectionsPerHost}). If the + * utilisation of those connections reaches a configurable threshold + * ({@link PoolingOptions#getMaxSimultaneousRequestsPerConnectionTreshold}), + * more connections are created up to a configurable maximum number of + * connections ({@link PoolingOptions#getMaxConnectionPerHost}). Once more + * than core connections have been created, connections in excess are + * reclaimed if the utilisation of opened connections drops below the + * configured threshold ({@link PoolingOptions#getMinSimultaneousRequestsPerConnectionTreshold}). + *

+ * Each of these parameters can be separately set for {@code LOCAL} and + * {@code REMOTE} hosts ({@link HostDistance}). For {@code IGNORED} hosts, + * the default for all those settings is 0 and cannot be changed. + */ + public static class PoolingOptions { + + // Note: we could use an enumMap or similar, but synchronization would + // be more costly so let's stick to volatile in for now. + private static final int DEFAULT_MIN_REQUESTS = 25; + private static final int DEFAULT_MAX_REQUESTS = 100; + + private static final int DEFAULT_CORE_POOL_LOCAL = 2; + private static final int DEFAULT_CORE_POOL_REMOTE = 1; + + private static final int DEFAULT_MAX_POOL_LOCAL = 8; + private static final int DEFAULT_MAX_POOL_REMOTE = 2; + + private volatile int minSimultaneousRequestsForLocal = DEFAULT_MIN_REQUESTS; + private volatile int minSimultaneousRequestsForRemote = DEFAULT_MIN_REQUESTS; + + private volatile int maxSimultaneousRequestsForLocal = DEFAULT_MAX_REQUESTS; + private volatile int maxSimultaneousRequestsForRemote = DEFAULT_MAX_REQUESTS; + + private volatile int coreConnectionsForLocal = DEFAULT_CORE_POOL_LOCAL; + private volatile int coreConnectionsForRemote = DEFAULT_CORE_POOL_REMOTE; + + private volatile int maxConnectionsForLocal = DEFAULT_MAX_POOL_LOCAL; + private volatile int maxConnectionsForRemote = DEFAULT_MAX_POOL_REMOTE; + + /** + * Number of simultaneous requests on a connection below which + * connections in excess are reclaimed. + *

+ * If an opened connection to an host at distance {@code distance} + * handles less than this number of simultaneous requests and there is + * more than {@link #getCoreConnectionsPerHost} connections open to this + * host, the connection is closed. + *

+ * The default value for this option is 25 for {@code LOCAL} and + * {@code REMOTE} hosts. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the configured threshold, or the default one if none have been set. + */ + public int getMinSimultaneousRequestsPerConnectionTreshold(HostDistance distance) { + switch (distance) { + case LOCAL: + return minSimultaneousRequestsForLocal; + case REMOTE: + return minSimultaneousRequestsForRemote; + default: + return 0; + } + } + + /** + * Sets the number of simultaneous requests on a connection below which + * connections in excess are reclaimed. + * + * @param distance the {@code HostDistance} for which to configure this threshold. + * @param minSimultaneousRequests the value to set. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setMinSimultaneousRequestsPerConnectionTreshold(HostDistance distance, int minSimultaneousRequests) { + switch (distance) { + case LOCAL: + minSimultaneousRequestsForLocal = minSimultaneousRequests; + break; + case REMOTE: + minSimultaneousRequestsForRemote = minSimultaneousRequests; + break; + default: + throw new IllegalArgumentException("Cannot set min streams per connection threshold for " + distance + " hosts"); + } + return this; + } + + /** + * Number of simultaneous requests on all connections to an host after + * which more connections are created. + *

+ * If all the connections opened to an host at distance {@code + * distance} connection are handling more than this number of + * simultaneous requests and there is less than + * {@link #getMaxConnectionPerHost} connections open to this host, a + * new connection is open. + *

+ * Note that a given connection cannot handle more than 128 + * simultaneous requests (protocol limitation). + *

+ * The default value for this option is 100 for {@code LOCAL} and + * {@code REMOTE} hosts. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the configured threshold, or the default one if none have been set. + */ + public int getMaxSimultaneousRequestsPerConnectionTreshold(HostDistance distance) { + switch (distance) { + case LOCAL: + return maxSimultaneousRequestsForLocal; + case REMOTE: + return maxSimultaneousRequestsForRemote; + default: + return 0; + } + } + + /** + * Sets number of simultaneous requests on all connections to an host after + * which more connections are created. + * + * @param distance the {@code HostDistance} for which to configure this threshold. + * @param maxSimultaneousRequests the value to set. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setMaxSimultaneousRequestsPerConnectionTreshold(HostDistance distance, int maxSimultaneousRequests) { + switch (distance) { + case LOCAL: + maxSimultaneousRequestsForLocal = maxSimultaneousRequests; + break; + case REMOTE: + maxSimultaneousRequestsForRemote = maxSimultaneousRequests; + break; + default: + throw new IllegalArgumentException("Cannot set max streams per connection threshold for " + distance + " hosts"); + } + return this; + } + + /** + * The core number of connections per host. + *

+ * For the provided {@code distance}, this correspond to the number of + * connections initially created and kept open to each host of that + * distance. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the core number of connections per host at distance {@code distance}. + */ + public int getCoreConnectionsPerHost(HostDistance distance) { + switch (distance) { + case LOCAL: + return coreConnectionsForLocal; + case REMOTE: + return coreConnectionsForRemote; + default: + return 0; + } + } + + /** + * Sets the core number of connections per host. + * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param coreConnections the value to set + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int coreConnections) { + // TODO: make sure the pools are updated accordingly + switch (distance) { + case LOCAL: + coreConnectionsForLocal = coreConnections; + break; + case REMOTE: + coreConnectionsForRemote = coreConnections; + break; + default: + throw new IllegalArgumentException("Cannot set core connections per host for " + distance + " hosts"); + } + return this; + } + + /** + * The maximum number of connections per host. + *

+ * For the provided {@code distance}, this correspond to the maximum + * number of connections that can be created per host at that distance. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the maximum number of connections per host at distance {@code distance}. + */ + public int getMaxConnectionPerHost(HostDistance distance) { + switch (distance) { + case LOCAL: + return maxConnectionsForLocal; + case REMOTE: + return maxConnectionsForRemote; + default: + return 0; + } + } + + /** + * Sets the maximum number of connections per host. + * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param maxConnections the value to set + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setMaxConnectionsPerHost(HostDistance distance, int maxConnections) { + // TODO: make sure the pools are updated accordingly + switch (distance) { + case LOCAL: + maxConnectionsForLocal = maxConnections; + break; + case REMOTE: + maxConnectionsForRemote = maxConnections; + break; + default: + throw new IllegalArgumentException("Cannot set max connections per host for " + distance + " hosts"); + } + return this; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java index 3d55da25854..d4e8b036692 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConvictionPolicy.java @@ -2,10 +2,13 @@ /** * The policy with which to decide whether a host should be considered down. - * TODO: not sure it's worth exposing this at this point. But if we do, we - * would need to expose ConnectionException + * + * TODO: this class is fully abstract (rather than an interface) because I'm + * not sure it's worth exposing (and if we do expose it, we need to expose + * ConnectionException). Maybe just exposing say a threshold of error before + * convicting a node is enough. */ -public interface ConvictionPolicy { +abstract class ConvictionPolicy { /** * Called when a connection error occurs on a connection to the host this policy applies to. @@ -14,12 +17,12 @@ public interface ConvictionPolicy { * * @return {@code true} if the host should be considered down. */ - public boolean addFailure(ConnectionException exception); + public abstract boolean addFailure(ConnectionException exception); /** * Called when the host has been detected up. */ - public void reset(); + public abstract void reset(); /** * Simple factory interface to allow creating {@link ConvictionPolicy} instances. @@ -34,4 +37,30 @@ public interface Factory { */ public ConvictionPolicy create(Host host); } + + public static class Simple extends ConvictionPolicy { + + private final Host host; + + private Simple(Host host) { + this.host = host; + } + + public boolean addFailure(ConnectionException exception) { + return true; + } + + public boolean addFailureFromExternalDetector() { + return true; + } + + public void reset() {} + + public static class Factory implements ConvictionPolicy.Factory { + + public ConvictionPolicy create(Host host) { + return new Simple(host); + } + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index c82e451248d..4de15ddcd57 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -16,8 +16,6 @@ class HostConnectionPool { public final Host host; public volatile HostDistance hostDistance; - private final Connection.Factory factory; - private final Configuration configuration; private final Session.Manager manager; private final List connections; @@ -30,11 +28,9 @@ class HostConnectionPool { private final Runnable newConnectionTask; - public HostConnectionPool(Host host, HostDistance hostDistance, Connection.Factory factory, Configuration configuration, Session.Manager manager) throws ConnectionException { + public HostConnectionPool(Host host, HostDistance hostDistance, Session.Manager manager) throws ConnectionException { this.host = host; this.hostDistance = hostDistance; - this.factory = factory; - this.configuration = configuration; this.manager = manager; this.newConnectionTask = new Runnable() { @@ -44,22 +40,26 @@ public void run() { }; // Create initial core connections - List l = new ArrayList(configuration.getCoreConnectionsPerHost(hostDistance)); - for (int i = 0; i < configuration.getCoreConnectionsPerHost(hostDistance); i++) - l.add(factory.open(host)); + List l = new ArrayList(options().getCoreConnectionsPerHost(hostDistance)); + for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) + l.add(manager.connectionFactory().open(host)); this.connections = new CopyOnWriteArrayList(l); this.open = new AtomicInteger(connections.size()); logger.trace(String.format("Created connection pool to host %s", host)); } + private ConnectionsConfiguration.PoolingOptions options() { + return manager.configuration().getConnectionsConfiguration().getPoolingOptions(); + } + public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { if (isShutdown.get()) // TODO: have a specific exception throw new ConnectionException(host.getAddress(), "Pool is shutdown"); if (connections.isEmpty()) { - for (int i = 0; i < configuration.getCoreConnectionsPerHost(hostDistance); i++) + for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) spawnNewConnection(); return waitForConnection(timeout, unit); } @@ -74,7 +74,7 @@ public Connection borrowConnection(long timeout, TimeUnit unit) throws Connectio } } - if (minInFlight >= configuration.getMaxStreamsPerConnectionTreshold(hostDistance) && connections.size() < configuration.getMaxConnectionPerHost(hostDistance)) + if (minInFlight >= options().getMaxSimultaneousRequestsPerConnectionTreshold(hostDistance) && connections.size() < options().getMaxConnectionPerHost(hostDistance)) spawnNewConnection(); while (true) { @@ -88,7 +88,7 @@ public Connection borrowConnection(long timeout, TimeUnit unit) throws Connectio if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) break; } - leastBusy.setKeyspace(configuration.keyspace); + leastBusy.setKeyspace(manager.poolsState.keyspace); return leastBusy; } @@ -177,7 +177,7 @@ public void returnConnection(Connection connection) { return; } - if (connections.size() > configuration.getCoreConnectionsPerHost(hostDistance) && inFlight <= configuration.getMinStreamsPerConnectionTreshold(hostDistance)) { + if (connections.size() > options().getCoreConnectionsPerHost(hostDistance) && inFlight <= options().getMinSimultaneousRequestsPerConnectionTreshold(hostDistance)) { trashConnection(connection); } else { signalAvailableConnection(); @@ -189,7 +189,7 @@ private boolean trashConnection(Connection connection) { // First, make sure we don't go below core connections for(;;) { int opened = open.get(); - if (opened <= configuration.getCoreConnectionsPerHost(hostDistance)) + if (opened <= options().getCoreConnectionsPerHost(hostDistance)) return false; if (open.compareAndSet(opened, opened - 1)) @@ -208,7 +208,7 @@ private boolean addConnectionIfUnderMaximum() { // First, make sure we don't cross the allowed limit of open connections for(;;) { int opened = open.get(); - if (opened >= configuration.getMaxConnectionPerHost(hostDistance)) + if (opened >= options().getMaxConnectionPerHost(hostDistance)) return false; if (open.compareAndSet(opened, opened + 1)) @@ -222,7 +222,7 @@ private boolean addConnectionIfUnderMaximum() { // Now really open the connection try { - connections.add(factory.open(host)); + connections.add(manager.connectionFactory().open(host)); signalAvailableConnection(); return true; } catch (ConnectionException e) { @@ -235,13 +235,13 @@ private boolean addConnectionIfUnderMaximum() { } private void spawnNewConnection() { - manager.cluster.manager.executor.submit(newConnectionTask); + manager.executor().submit(newConnectionTask); } private void replace(final Connection connection) { connections.remove(connection); - manager.cluster.manager.executor.submit(new Runnable() { + manager.executor().submit(new Runnable() { public void run() { connection.close(); addConnectionIfUnderMaximum(); @@ -250,7 +250,7 @@ public void run() { } private void close(final Connection connection) { - manager.cluster.manager.executor.submit(new Runnable() { + manager.executor().submit(new Runnable() { public void run() { connection.close(); } @@ -279,43 +279,12 @@ private void discardAvailableConnections() { } } - // TODO: move that out an make that configurable - public static class Configuration { + static class PoolState { private volatile String keyspace; public void setKeyspace(String keyspace) { this.keyspace = keyspace; } - - public int getMinStreamsPerConnectionTreshold(HostDistance distance) { - return 25; - } - - public int getMaxStreamsPerConnectionTreshold(HostDistance distance) { - return 100; - } - - public int getCoreConnectionsPerHost(HostDistance distance) { - switch (distance) { - case LOCAL: - return 2; - case REMOTE: - return 1; - default: - return 0; - } - } - - public int getMaxConnectionPerHost(HostDistance distance) { - switch (distance) { - case LOCAL: - return 10; - case REMOTE: - return 3; - default: - return 0; - } - } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/Policies.java new file mode 100644 index 00000000000..f76412ed60b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Policies.java @@ -0,0 +1,33 @@ +package com.datastax.driver.core; + +public class Policies { + + public static final LoadBalancingPolicy.Factory DEFAULT_LOAD_BALANCING_POLICY_FACTORY = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE; + public static final ReconnectionPolicy.Factory DEFAULT_RECONNECTION_POLICY_FACTORY = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); + public static final RetryPolicy DEFAULT_RETRY_POLICY = RetryPolicy.Default.INSTANCE; + + private final LoadBalancingPolicy.Factory loadBalancingPolicyFactory; + private final ReconnectionPolicy.Factory reconnectionPolicyFactory; + private final RetryPolicy retryPolicy; + + public Policies(LoadBalancingPolicy.Factory loadBalancingPolicyFactory, + ReconnectionPolicy.Factory reconnectionPolicyFactory, + RetryPolicy retryPolicy) { + + this.loadBalancingPolicyFactory = loadBalancingPolicyFactory; + this.reconnectionPolicyFactory = reconnectionPolicyFactory; + this.retryPolicy = retryPolicy; + } + + public LoadBalancingPolicy.Factory getLoadBalancingPolicyFactory() { + return loadBalancingPolicyFactory; + } + + public ReconnectionPolicy.Factory getReconnectionPolicyFactory() { + return reconnectionPolicyFactory; + } + + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index ca76e9d5f45..8216f044e75 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -175,7 +175,7 @@ public void onSet(Connection connection, Message.Response response) { // means we should reset the keyspace to null in that case. // propagate the keyspace change to other connections - session.poolsConfiguration.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); + session.poolsState.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); break; case SCHEMA_CHANGE: ResultMessage.SchemaChange scc = (ResultMessage.SchemaChange)rm; diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index eb585bb2d66..984d0f5948f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -100,7 +100,7 @@ private void retry(final boolean retryCurrent, ConsistencyLevel newConsistencyLe this.retryConsistencyLevel = newConsistencyLevel; // We should not retry on the current thread as this will be an IO thread. - manager.cluster.manager.executor.execute(new Runnable() { + manager.executor().execute(new Runnable() { public void run() { if (retryCurrent) { if (query(h)) @@ -138,24 +138,25 @@ public void onSet(Connection connection, Message.Response response) { case ERROR: ErrorMessage err = (ErrorMessage)response; RetryPolicy.RetryDecision retry = null; + RetryPolicy retryPolicy = manager.configuration().getPolicies().getRetryPolicy(); switch (err.error.code()) { case READ_TIMEOUT: assert err.error instanceof ReadTimeoutException; ReadTimeoutException rte = (ReadTimeoutException)err.error; ConsistencyLevel rcl = ConsistencyLevel.from(rte.consistency); - retry = manager.retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, queryRetries); + retry = retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, queryRetries); break; case WRITE_TIMEOUT: assert err.error instanceof WriteTimeoutException; WriteTimeoutException wte = (WriteTimeoutException)err.error; ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); - retry = manager.retryPolicy.onWriteTimeout(wcl, WriteType.from(wte.writeType), wte.received, wte.blockFor, queryRetries); + retry = retryPolicy.onWriteTimeout(wcl, WriteType.from(wte.writeType), wte.received, wte.blockFor, queryRetries); break; case UNAVAILABLE: assert err.error instanceof UnavailableException; UnavailableException ue = (UnavailableException)err.error; ConsistencyLevel ucl = ConsistencyLevel.from(ue.consistency); - retry = manager.retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); + retry = retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); break; case OVERLOADED: // Try another node diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index f215c5a9f98..971fe76a624 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -285,21 +285,30 @@ static class Manager implements Host.StateListener { final ConcurrentMap pools; final LoadBalancingPolicy loadBalancer; - // TODO: make that configurable - final RetryPolicy retryPolicy = RetryPolicy.Default.INSTANCE; - - final HostConnectionPool.Configuration poolsConfiguration; + final HostConnectionPool.PoolState poolsState; // TODO: Make that configurable final long DEFAULT_PER_HOST_CONNECTION_TIMEOUT = 3000; + public Connection.Factory connectionFactory() { + return cluster.manager.connectionFactory; + } + + public Cluster.Configuration configuration() { + return cluster.manager.configuration; + } + + public ExecutorService executor() { + return cluster.manager.executor; + } + public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; // TODO: consider the use of NonBlockingHashMap this.pools = new ConcurrentHashMap(hosts.size()); - this.loadBalancer = cluster.manager.loadBalancingFactory.create(hosts); - this.poolsConfiguration = new HostConnectionPool.Configuration(); + this.loadBalancer = cluster.manager.configuration.getPolicies().getLoadBalancingPolicyFactory().create(hosts); + this.poolsState = new HostConnectionPool.PoolState(); for (Host host : hosts) addHost(host); @@ -311,7 +320,7 @@ private HostConnectionPool addHost(Host host) { if (distance == HostDistance.IGNORED) return pools.get(host); else - return pools.put(host, new HostConnectionPool(host, distance, cluster.manager.connectionFactory, poolsConfiguration, this)); + return pools.put(host, new HostConnectionPool(host, distance, this)); } catch (ConnectionException e) { logger.debug(String.format("Error creating pool to %s (%s)", host, e.getMessage())); host.getMonitor().signalConnectionFailure(e); diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java deleted file mode 100644 index d193cd8ebc8..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleConvictionPolicy.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.datastax.driver.core; - -public class SimpleConvictionPolicy implements ConvictionPolicy { - - private final Host host; - - private SimpleConvictionPolicy(Host host) { - this.host = host; - } - - public boolean addFailure(ConnectionException exception) { - // TODO: be kinder - return true; - } - - public boolean addFailureFromExternalDetector() { - return true; - } - - public void reset() {} - - public static class Factory implements ConvictionPolicy.Factory { - - public ConvictionPolicy create(Host host) { - return new SimpleConvictionPolicy(host); - } - } -} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 412dec4e4fa..ddb4613bf4b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -168,7 +168,7 @@ public void MultiNodeContinuousExecuteTest() throws Exception { Session session = cluster.connect(); try { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); // We should deal with that sleep try { Thread.sleep(2000); } catch (Exception e) {} session.execute("USE test_ks"); From 35f69e4c88a7b510eee7a8432143c0b20ba6dae0 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 24 Oct 2012 19:18:58 +0200 Subject: [PATCH 054/719] Move configuration related class to specific package --- .../core/AbstractReconnectionHandler.java | 2 + .../com/datastax/driver/core/Cluster.java | 1 + .../com/datastax/driver/core/Connection.java | 8 +- .../driver/core/ConnectionsConfiguration.java | 415 ------------------ .../driver/core/ControlConnection.java | 1 + .../driver/core/HostConnectionPool.java | 4 +- .../driver/core/RetryingCallback.java | 5 +- .../com/datastax/driver/core/Session.java | 1 + .../ConnectionsConfiguration.java | 45 ++ .../LoadBalancingPolicy.java | 4 +- .../core/{ => configuration}/Policies.java | 2 +- .../core/configuration/PoolingOptions.java | 245 +++++++++++ .../core/configuration/ProtocolOptions.java | 56 +++ .../ReconnectionPolicy.java | 2 +- .../core/{ => configuration}/RetryPolicy.java | 33 +- .../core/configuration/SocketOptions.java | 75 ++++ 16 files changed, 470 insertions(+), 429 deletions(-) delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java rename driver-core/src/main/java/com/datastax/driver/core/{ => configuration}/LoadBalancingPolicy.java (99%) rename driver-core/src/main/java/com/datastax/driver/core/{ => configuration}/Policies.java (96%) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java rename driver-core/src/main/java/com/datastax/driver/core/{ => configuration}/ReconnectionPolicy.java (99%) rename driver-core/src/main/java/com/datastax/driver/core/{ => configuration}/RetryPolicy.java (96%) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index db0e1a3034a..c24e61bfeac 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -6,6 +6,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.datastax.driver.core.configuration.ReconnectionPolicy; + abstract class AbstractReconnectionHandler implements Runnable { private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandler.class); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 65307a4ef1a..87b579d3fc1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -13,6 +13,7 @@ import org.apache.cassandra.transport.messages.QueryMessage; import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.configuration.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 8bf1ac30414..651d601340b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -8,7 +8,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import com.datastax.driver.core.Host; +import com.datastax.driver.core.configuration.*; import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.service.ClientState; @@ -107,8 +107,8 @@ private void initializeTransport() throws ConnectionException { Map options = new HashMap() {{ put(StartupMessage.CQL_VERSION, CQL_VERSION); }}; - ConnectionsConfiguration.ProtocolOptions.Compression compression = factory.configuration.getProtocolOptions().getCompression(); - if (compression != ConnectionsConfiguration.ProtocolOptions.Compression.NONE) + ProtocolOptions.Compression compression = factory.configuration.getProtocolOptions().getCompression(); + if (compression != ProtocolOptions.Compression.NONE) options.put(StartupMessage.COMPRESSION, compression.toString()); StartupMessage startup = new StartupMessage(options); try { @@ -320,7 +320,7 @@ private AtomicInteger getIdGenerator(Host host) { private ClientBootstrap bootstrap() { ClientBootstrap b = new ClientBootstrap(new NioClientSocketChannelFactory(bossExecutor, workerExecutor)); - ConnectionsConfiguration.SocketOptions options = configuration.getSocketOptions(); + SocketOptions options = configuration.getSocketOptions(); b.setOption("connectTimeoutMillis", options.getConnectTimeoutMillis()); Boolean keepAlive = options.getKeepAlive(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java deleted file mode 100644 index 36056e14cd4..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java +++ /dev/null @@ -1,415 +0,0 @@ -package com.datastax.driver.core; - -/** - * Handle all configuration related of the connections to the Cassandra hosts. - * - * This handle setting: - *

    - *
  • low-level tcp configuration options (tcpNoDelay, keepAlive, ...).
  • - *
  • Cassandra binary protocol level configuration (compression).
  • - *
  • Connection pooling configurations.
  • - *
- */ -public class ConnectionsConfiguration { - - private final SocketOptions socketOptions = new SocketOptions(); - private final ProtocolOptions protocolOptions = new ProtocolOptions(); - private final PoolingOptions poolingOptions = new PoolingOptions(); - - /** - * The socket options. - * - * @return the socket options. - */ - public SocketOptions getSocketOptions() { - return socketOptions; - } - - /** - * The protocol options. - * - * @return the protocol options. - */ - public ProtocolOptions getProtocolOptions() { - return protocolOptions; - } - - /** - * The pooling options. - * - * @return the pooling options. - */ - public PoolingOptions getPoolingOptions() { - return poolingOptions; - } - - /** - * Options to configure low-level socket options for the connections kept - * to the Cassandra hosts. - */ - public static class SocketOptions { - - public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 5000; - - private volatile int connectTimeoutMillis = DEFAULT_CONNECT_TIMEOUT_MILLIS; - private volatile Boolean keepAlive; - private volatile Boolean reuseAddress; - private volatile Integer soLinger; - private volatile Boolean tcpNoDelay; - private volatile Integer receiveBufferSize; - private volatile Integer sendBufferSize; - - public int getConnectTimeoutMillis() { - return connectTimeoutMillis; - } - - public void setConnectTimeoutMillis(int connectTimeoutMillis) { - this.connectTimeoutMillis = connectTimeoutMillis; - } - - public Boolean getKeepAlive() { - return keepAlive; - } - - public void setKeepAlive(boolean keepAlive) { - this.keepAlive = keepAlive; - } - - public Boolean getReuseAddress() { - return reuseAddress; - } - - public void setReuseAddress(boolean reuseAddress) { - this.reuseAddress = reuseAddress; - } - - public Integer getSoLinger() { - return soLinger; - } - - public void setSoLinger(int soLinger) { - this.soLinger = soLinger; - } - - public Boolean getTcpNoDelay() { - return tcpNoDelay; - } - - public void setTcpNoDelay(boolean tcpNoDelay) { - this.tcpNoDelay = tcpNoDelay; - } - - public Integer getReceiveBufferSize() { - return receiveBufferSize; - } - - public void setReceiveBufferSize(int receiveBufferSize) { - this.receiveBufferSize = receiveBufferSize; - } - - public Integer getSendBufferSize() { - return sendBufferSize; - } - - public void setSendBufferSize(int sendBufferSize) { - this.sendBufferSize = sendBufferSize; - } - } - - /** - * Options of the Cassandra native binary protocol. - */ - public static class ProtocolOptions { - - /** - * Compression supported by the Cassandra binary protocol. - */ - public enum Compression { - /** No compression */ - NONE(""), - /** Snappy compression */ - SNAPPY("snappy"); - - final String protocolName; - - private Compression(String protocolName) { - this.protocolName = protocolName; - } - - @Override - public String toString() { - return protocolName; - } - }; - - private volatile Compression compression = Compression.NONE; - - /** - * Returns the compression used by the protocol. - *

- * The default compression is {@code Compression.SNAPPY}. - * - * @return the compression used. - */ - public Compression getCompression() { - return compression; - } - - /** - * Sets the compression to use. - *

- * Note that while this setting can be changed at any time, it will - * only apply to newly created connections. - * - * @param compression the compression algorithm to use (or {@code - * Compression.NONE} to disable compression). - * @return this {@code ProtocolOptions} object. - */ - public ProtocolOptions setCompression(Compression compression) { - this.compression = compression; - return this; - } - } - - /** - * Options related to connection pooling. - *

- * The driver uses connections in an asynchronous way. Meaning that - * multiple requests can be submitted on the same connection at the same - * time. This means that the driver only needs to maintain a relatively - * small number of connections to each Cassandra host. These options allow - * to control how many connections are kept exactly. - *

- * For each host, the driver keeps a core amount of connections open at all - * time ({@link PoolingOptions#getCoreConnectionsPerHost}). If the - * utilisation of those connections reaches a configurable threshold - * ({@link PoolingOptions#getMaxSimultaneousRequestsPerConnectionTreshold}), - * more connections are created up to a configurable maximum number of - * connections ({@link PoolingOptions#getMaxConnectionPerHost}). Once more - * than core connections have been created, connections in excess are - * reclaimed if the utilisation of opened connections drops below the - * configured threshold ({@link PoolingOptions#getMinSimultaneousRequestsPerConnectionTreshold}). - *

- * Each of these parameters can be separately set for {@code LOCAL} and - * {@code REMOTE} hosts ({@link HostDistance}). For {@code IGNORED} hosts, - * the default for all those settings is 0 and cannot be changed. - */ - public static class PoolingOptions { - - // Note: we could use an enumMap or similar, but synchronization would - // be more costly so let's stick to volatile in for now. - private static final int DEFAULT_MIN_REQUESTS = 25; - private static final int DEFAULT_MAX_REQUESTS = 100; - - private static final int DEFAULT_CORE_POOL_LOCAL = 2; - private static final int DEFAULT_CORE_POOL_REMOTE = 1; - - private static final int DEFAULT_MAX_POOL_LOCAL = 8; - private static final int DEFAULT_MAX_POOL_REMOTE = 2; - - private volatile int minSimultaneousRequestsForLocal = DEFAULT_MIN_REQUESTS; - private volatile int minSimultaneousRequestsForRemote = DEFAULT_MIN_REQUESTS; - - private volatile int maxSimultaneousRequestsForLocal = DEFAULT_MAX_REQUESTS; - private volatile int maxSimultaneousRequestsForRemote = DEFAULT_MAX_REQUESTS; - - private volatile int coreConnectionsForLocal = DEFAULT_CORE_POOL_LOCAL; - private volatile int coreConnectionsForRemote = DEFAULT_CORE_POOL_REMOTE; - - private volatile int maxConnectionsForLocal = DEFAULT_MAX_POOL_LOCAL; - private volatile int maxConnectionsForRemote = DEFAULT_MAX_POOL_REMOTE; - - /** - * Number of simultaneous requests on a connection below which - * connections in excess are reclaimed. - *

- * If an opened connection to an host at distance {@code distance} - * handles less than this number of simultaneous requests and there is - * more than {@link #getCoreConnectionsPerHost} connections open to this - * host, the connection is closed. - *

- * The default value for this option is 25 for {@code LOCAL} and - * {@code REMOTE} hosts. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the configured threshold, or the default one if none have been set. - */ - public int getMinSimultaneousRequestsPerConnectionTreshold(HostDistance distance) { - switch (distance) { - case LOCAL: - return minSimultaneousRequestsForLocal; - case REMOTE: - return minSimultaneousRequestsForRemote; - default: - return 0; - } - } - - /** - * Sets the number of simultaneous requests on a connection below which - * connections in excess are reclaimed. - * - * @param distance the {@code HostDistance} for which to configure this threshold. - * @param minSimultaneousRequests the value to set. - * @return this {@code PoolingOptions}. - * - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. - */ - public PoolingOptions setMinSimultaneousRequestsPerConnectionTreshold(HostDistance distance, int minSimultaneousRequests) { - switch (distance) { - case LOCAL: - minSimultaneousRequestsForLocal = minSimultaneousRequests; - break; - case REMOTE: - minSimultaneousRequestsForRemote = minSimultaneousRequests; - break; - default: - throw new IllegalArgumentException("Cannot set min streams per connection threshold for " + distance + " hosts"); - } - return this; - } - - /** - * Number of simultaneous requests on all connections to an host after - * which more connections are created. - *

- * If all the connections opened to an host at distance {@code - * distance} connection are handling more than this number of - * simultaneous requests and there is less than - * {@link #getMaxConnectionPerHost} connections open to this host, a - * new connection is open. - *

- * Note that a given connection cannot handle more than 128 - * simultaneous requests (protocol limitation). - *

- * The default value for this option is 100 for {@code LOCAL} and - * {@code REMOTE} hosts. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the configured threshold, or the default one if none have been set. - */ - public int getMaxSimultaneousRequestsPerConnectionTreshold(HostDistance distance) { - switch (distance) { - case LOCAL: - return maxSimultaneousRequestsForLocal; - case REMOTE: - return maxSimultaneousRequestsForRemote; - default: - return 0; - } - } - - /** - * Sets number of simultaneous requests on all connections to an host after - * which more connections are created. - * - * @param distance the {@code HostDistance} for which to configure this threshold. - * @param maxSimultaneousRequests the value to set. - * @return this {@code PoolingOptions}. - * - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. - */ - public PoolingOptions setMaxSimultaneousRequestsPerConnectionTreshold(HostDistance distance, int maxSimultaneousRequests) { - switch (distance) { - case LOCAL: - maxSimultaneousRequestsForLocal = maxSimultaneousRequests; - break; - case REMOTE: - maxSimultaneousRequestsForRemote = maxSimultaneousRequests; - break; - default: - throw new IllegalArgumentException("Cannot set max streams per connection threshold for " + distance + " hosts"); - } - return this; - } - - /** - * The core number of connections per host. - *

- * For the provided {@code distance}, this correspond to the number of - * connections initially created and kept open to each host of that - * distance. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the core number of connections per host at distance {@code distance}. - */ - public int getCoreConnectionsPerHost(HostDistance distance) { - switch (distance) { - case LOCAL: - return coreConnectionsForLocal; - case REMOTE: - return coreConnectionsForRemote; - default: - return 0; - } - } - - /** - * Sets the core number of connections per host. - * - * @param distance the {@code HostDistance} for which to set this threshold. - * @param coreConnections the value to set - * @return this {@code PoolingOptions}. - * - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. - */ - public PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int coreConnections) { - // TODO: make sure the pools are updated accordingly - switch (distance) { - case LOCAL: - coreConnectionsForLocal = coreConnections; - break; - case REMOTE: - coreConnectionsForRemote = coreConnections; - break; - default: - throw new IllegalArgumentException("Cannot set core connections per host for " + distance + " hosts"); - } - return this; - } - - /** - * The maximum number of connections per host. - *

- * For the provided {@code distance}, this correspond to the maximum - * number of connections that can be created per host at that distance. - * - * @param distance the {@code HostDistance} for which to return this threshold. - * @return the maximum number of connections per host at distance {@code distance}. - */ - public int getMaxConnectionPerHost(HostDistance distance) { - switch (distance) { - case LOCAL: - return maxConnectionsForLocal; - case REMOTE: - return maxConnectionsForRemote; - default: - return 0; - } - } - - /** - * Sets the maximum number of connections per host. - * - * @param distance the {@code HostDistance} for which to set this threshold. - * @param maxConnections the value to set - * @return this {@code PoolingOptions}. - * - * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. - */ - public PoolingOptions setMaxConnectionsPerHost(HostDistance distance, int maxConnections) { - // TODO: make sure the pools are updated accordingly - switch (distance) { - case LOCAL: - maxConnectionsForLocal = maxConnections; - break; - case REMOTE: - maxConnectionsForRemote = maxConnections; - break; - default: - throw new IllegalArgumentException("Cannot set max connections per host for " + distance + " hosts"); - } - return this; - } - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 4c2edf9d97e..d3dbaa23760 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -14,6 +14,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.datastax.driver.core.configuration.*; import com.datastax.driver.core.exceptions.NoHostAvailableException; class ControlConnection implements Host.StateListener { diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 4de15ddcd57..1d6c15d979b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -9,6 +9,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.datastax.driver.core.configuration.*; + // TODO: We should allow changing the core pool size (i.e. have a method that // adds new connection or trash existing one) class HostConnectionPool { @@ -49,7 +51,7 @@ public void run() { logger.trace(String.format("Created connection pool to host %s", host)); } - private ConnectionsConfiguration.PoolingOptions options() { + private PoolingOptions options() { return manager.configuration().getConnectionsConfiguration().getPoolingOptions(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 984d0f5948f..f9dc0c950c4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -9,6 +9,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.ExecutionException; +import com.datastax.driver.core.configuration.RetryPolicy; import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.utils.SimpleFuture; @@ -199,10 +200,10 @@ public void onSet(Connection connection, Message.Response response) { return; } } - switch (retry.type) { + switch (retry.getType()) { case RETRY: ++queryRetries; - retry(true, retry.retryCL); + retry(true, retry.getRetryConsistencyLevel()); break; case RETHROW: callback.onSet(connection, response); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 971fe76a624..08afec3c00f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -5,6 +5,7 @@ import java.util.concurrent.*; import com.datastax.driver.core.exceptions.*; +import com.datastax.driver.core.configuration.*; import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.*; diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java new file mode 100644 index 00000000000..f9874c45c0a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java @@ -0,0 +1,45 @@ +package com.datastax.driver.core.configuration; + +/** + * Handle all configuration related of the connections to the Cassandra hosts. + * + * This handle setting: + *

    + *
  • low-level tcp configuration options (tcpNoDelay, keepAlive, ...).
  • + *
  • Cassandra binary protocol level configuration (compression).
  • + *
  • Connection pooling configurations.
  • + *
+ */ +public class ConnectionsConfiguration { + + private final SocketOptions socketOptions = new SocketOptions(); + private final ProtocolOptions protocolOptions = new ProtocolOptions(); + private final PoolingOptions poolingOptions = new PoolingOptions(); + + /** + * The socket options. + * + * @return the socket options. + */ + public SocketOptions getSocketOptions() { + return socketOptions; + } + + /** + * The protocol options. + * + * @return the protocol options. + */ + public ProtocolOptions getProtocolOptions() { + return protocolOptions; + } + + /** + * The pooling options. + * + * @return the pooling options. + */ + public PoolingOptions getPoolingOptions() { + return poolingOptions; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java index d456d70d7ab..9f73c0dcf4e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core; +package com.datastax.driver.core.configuration; import java.util.*; import java.util.concurrent.*; @@ -6,6 +6,8 @@ import com.google.common.collect.AbstractIterator; +import com.datastax.driver.core.*; + /** * The policy that decides which Cassandra hosts to contact for each new query. *

diff --git a/driver-core/src/main/java/com/datastax/driver/core/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/Policies.java similarity index 96% rename from driver-core/src/main/java/com/datastax/driver/core/Policies.java rename to driver-core/src/main/java/com/datastax/driver/core/configuration/Policies.java index f76412ed60b..4324cded6e4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/Policies.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core; +package com.datastax.driver.core.configuration; public class Policies { diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java new file mode 100644 index 00000000000..d4367a0ede4 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java @@ -0,0 +1,245 @@ +package com.datastax.driver.core.configuration; + +import com.datastax.driver.core.HostDistance; + +/** + * Options related to connection pooling. + *

+ * The driver uses connections in an asynchronous way. Meaning that + * multiple requests can be submitted on the same connection at the same + * time. This means that the driver only needs to maintain a relatively + * small number of connections to each Cassandra host. These options allow + * to control how many connections are kept exactly. + *

+ * For each host, the driver keeps a core amount of connections open at all + * time ({@link PoolingOptions#getCoreConnectionsPerHost}). If the + * utilisation of those connections reaches a configurable threshold + * ({@link PoolingOptions#getMaxSimultaneousRequestsPerConnectionTreshold}), + * more connections are created up to a configurable maximum number of + * connections ({@link PoolingOptions#getMaxConnectionPerHost}). Once more + * than core connections have been created, connections in excess are + * reclaimed if the utilisation of opened connections drops below the + * configured threshold ({@link PoolingOptions#getMinSimultaneousRequestsPerConnectionTreshold}). + *

+ * Each of these parameters can be separately set for {@code LOCAL} and + * {@code REMOTE} hosts ({@link HostDistance}). For {@code IGNORED} hosts, + * the default for all those settings is 0 and cannot be changed. + */ +public class PoolingOptions { + + // Note: we could use an enumMap or similar, but synchronization would + // be more costly so let's stick to volatile in for now. + private static final int DEFAULT_MIN_REQUESTS = 25; + private static final int DEFAULT_MAX_REQUESTS = 100; + + private static final int DEFAULT_CORE_POOL_LOCAL = 2; + private static final int DEFAULT_CORE_POOL_REMOTE = 1; + + private static final int DEFAULT_MAX_POOL_LOCAL = 8; + private static final int DEFAULT_MAX_POOL_REMOTE = 2; + + private volatile int minSimultaneousRequestsForLocal = DEFAULT_MIN_REQUESTS; + private volatile int minSimultaneousRequestsForRemote = DEFAULT_MIN_REQUESTS; + + private volatile int maxSimultaneousRequestsForLocal = DEFAULT_MAX_REQUESTS; + private volatile int maxSimultaneousRequestsForRemote = DEFAULT_MAX_REQUESTS; + + private volatile int coreConnectionsForLocal = DEFAULT_CORE_POOL_LOCAL; + private volatile int coreConnectionsForRemote = DEFAULT_CORE_POOL_REMOTE; + + private volatile int maxConnectionsForLocal = DEFAULT_MAX_POOL_LOCAL; + private volatile int maxConnectionsForRemote = DEFAULT_MAX_POOL_REMOTE; + + /** + * Number of simultaneous requests on a connection below which + * connections in excess are reclaimed. + *

+ * If an opened connection to an host at distance {@code distance} + * handles less than this number of simultaneous requests and there is + * more than {@link #getCoreConnectionsPerHost} connections open to this + * host, the connection is closed. + *

+ * The default value for this option is 25 for {@code LOCAL} and + * {@code REMOTE} hosts. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the configured threshold, or the default one if none have been set. + */ + public int getMinSimultaneousRequestsPerConnectionTreshold(HostDistance distance) { + switch (distance) { + case LOCAL: + return minSimultaneousRequestsForLocal; + case REMOTE: + return minSimultaneousRequestsForRemote; + default: + return 0; + } + } + + /** + * Sets the number of simultaneous requests on a connection below which + * connections in excess are reclaimed. + * + * @param distance the {@code HostDistance} for which to configure this threshold. + * @param minSimultaneousRequests the value to set. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setMinSimultaneousRequestsPerConnectionTreshold(HostDistance distance, int minSimultaneousRequests) { + switch (distance) { + case LOCAL: + minSimultaneousRequestsForLocal = minSimultaneousRequests; + break; + case REMOTE: + minSimultaneousRequestsForRemote = minSimultaneousRequests; + break; + default: + throw new IllegalArgumentException("Cannot set min streams per connection threshold for " + distance + " hosts"); + } + return this; + } + + /** + * Number of simultaneous requests on all connections to an host after + * which more connections are created. + *

+ * If all the connections opened to an host at distance {@code + * distance} connection are handling more than this number of + * simultaneous requests and there is less than + * {@link #getMaxConnectionPerHost} connections open to this host, a + * new connection is open. + *

+ * Note that a given connection cannot handle more than 128 + * simultaneous requests (protocol limitation). + *

+ * The default value for this option is 100 for {@code LOCAL} and + * {@code REMOTE} hosts. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the configured threshold, or the default one if none have been set. + */ + public int getMaxSimultaneousRequestsPerConnectionTreshold(HostDistance distance) { + switch (distance) { + case LOCAL: + return maxSimultaneousRequestsForLocal; + case REMOTE: + return maxSimultaneousRequestsForRemote; + default: + return 0; + } + } + + /** + * Sets number of simultaneous requests on all connections to an host after + * which more connections are created. + * + * @param distance the {@code HostDistance} for which to configure this threshold. + * @param maxSimultaneousRequests the value to set. + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setMaxSimultaneousRequestsPerConnectionTreshold(HostDistance distance, int maxSimultaneousRequests) { + switch (distance) { + case LOCAL: + maxSimultaneousRequestsForLocal = maxSimultaneousRequests; + break; + case REMOTE: + maxSimultaneousRequestsForRemote = maxSimultaneousRequests; + break; + default: + throw new IllegalArgumentException("Cannot set max streams per connection threshold for " + distance + " hosts"); + } + return this; + } + + /** + * The core number of connections per host. + *

+ * For the provided {@code distance}, this correspond to the number of + * connections initially created and kept open to each host of that + * distance. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the core number of connections per host at distance {@code distance}. + */ + public int getCoreConnectionsPerHost(HostDistance distance) { + switch (distance) { + case LOCAL: + return coreConnectionsForLocal; + case REMOTE: + return coreConnectionsForRemote; + default: + return 0; + } + } + + /** + * Sets the core number of connections per host. + * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param coreConnections the value to set + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int coreConnections) { + // TODO: make sure the pools are updated accordingly + switch (distance) { + case LOCAL: + coreConnectionsForLocal = coreConnections; + break; + case REMOTE: + coreConnectionsForRemote = coreConnections; + break; + default: + throw new IllegalArgumentException("Cannot set core connections per host for " + distance + " hosts"); + } + return this; + } + + /** + * The maximum number of connections per host. + *

+ * For the provided {@code distance}, this correspond to the maximum + * number of connections that can be created per host at that distance. + * + * @param distance the {@code HostDistance} for which to return this threshold. + * @return the maximum number of connections per host at distance {@code distance}. + */ + public int getMaxConnectionPerHost(HostDistance distance) { + switch (distance) { + case LOCAL: + return maxConnectionsForLocal; + case REMOTE: + return maxConnectionsForRemote; + default: + return 0; + } + } + + /** + * Sets the maximum number of connections per host. + * + * @param distance the {@code HostDistance} for which to set this threshold. + * @param maxConnections the value to set + * @return this {@code PoolingOptions}. + * + * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. + */ + public PoolingOptions setMaxConnectionsPerHost(HostDistance distance, int maxConnections) { + // TODO: make sure the pools are updated accordingly + switch (distance) { + case LOCAL: + maxConnectionsForLocal = maxConnections; + break; + case REMOTE: + maxConnectionsForRemote = maxConnections; + break; + default: + throw new IllegalArgumentException("Cannot set max connections per host for " + distance + " hosts"); + } + return this; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java new file mode 100644 index 00000000000..977cf15c8a8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java @@ -0,0 +1,56 @@ +package com.datastax.driver.core.configuration; + +/** + * Options of the Cassandra native binary protocol. + */ +public class ProtocolOptions { + + /** + * Compression supported by the Cassandra binary protocol. + */ + public enum Compression { + /** No compression */ + NONE(""), + /** Snappy compression */ + SNAPPY("snappy"); + + final String protocolName; + + private Compression(String protocolName) { + this.protocolName = protocolName; + } + + @Override + public String toString() { + return protocolName; + } + }; + + private volatile Compression compression = Compression.NONE; + + /** + * Returns the compression used by the protocol. + *

+ * The default compression is {@code Compression.SNAPPY}. + * + * @return the compression used. + */ + public Compression getCompression() { + return compression; + } + + /** + * Sets the compression to use. + *

+ * Note that while this setting can be changed at any time, it will + * only apply to newly created connections. + * + * @param compression the compression algorithm to use (or {@code + * Compression.NONE} to disable compression). + * @return this {@code ProtocolOptions} object. + */ + public ProtocolOptions setCompression(Compression compression) { + this.compression = compression; + return this; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java index 58af142de13..ac06c304b43 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core; +package com.datastax.driver.core.configuration; /** * Policy that decides how often the reconnection to a dead node is attempted. diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/RetryPolicy.java similarity index 96% rename from driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/configuration/RetryPolicy.java index a9e4be676a6..d5bbae2f7d1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/RetryPolicy.java @@ -1,9 +1,11 @@ -package com.datastax.driver.core; +package com.datastax.driver.core.configuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.log4j.Level; +import com.datastax.driver.core.*; + /** * A policy that defines a default behavior to adopt when a request returns * a TimeoutException or an UnavailableException. @@ -28,15 +30,38 @@ public interface RetryPolicy { * */ public static class RetryDecision { - static enum Type { RETRY, RETHROW, IGNORE }; - final Type type; - final ConsistencyLevel retryCL; + /** + * The type of retry decisions. + */ + public static enum Type { RETRY, RETHROW, IGNORE }; + + private final Type type; + private final ConsistencyLevel retryCL; private RetryDecision(Type type, ConsistencyLevel retryCL) { this.type = type; this.retryCL = retryCL; } + /** + * The type of this retry decision. + * + * @return the type of this retry decision. + */ + public Type getType() { + return type; + } + + /** + * The consistency level for a retry decision. + * + * @return the consistency level for a retry decision or {@code null} + * if this retry decision is an {@code IGNORE} or a {@code RETHROW}. + */ + public ConsistencyLevel getRetryConsistencyLevel() { + return retryCL; + } + /** * Creates a RETHROW retry decision. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java new file mode 100644 index 00000000000..e5d49b395d2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java @@ -0,0 +1,75 @@ +package com.datastax.driver.core.configuration; + +/** + * Options to configure low-level socket options for the connections kept + * to the Cassandra hosts. + */ +public class SocketOptions { + + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 5000; + + private volatile int connectTimeoutMillis = DEFAULT_CONNECT_TIMEOUT_MILLIS; + private volatile Boolean keepAlive; + private volatile Boolean reuseAddress; + private volatile Integer soLinger; + private volatile Boolean tcpNoDelay; + private volatile Integer receiveBufferSize; + private volatile Integer sendBufferSize; + + public int getConnectTimeoutMillis() { + return connectTimeoutMillis; + } + + public void setConnectTimeoutMillis(int connectTimeoutMillis) { + this.connectTimeoutMillis = connectTimeoutMillis; + } + + public Boolean getKeepAlive() { + return keepAlive; + } + + public void setKeepAlive(boolean keepAlive) { + this.keepAlive = keepAlive; + } + + public Boolean getReuseAddress() { + return reuseAddress; + } + + public void setReuseAddress(boolean reuseAddress) { + this.reuseAddress = reuseAddress; + } + + public Integer getSoLinger() { + return soLinger; + } + + public void setSoLinger(int soLinger) { + this.soLinger = soLinger; + } + + public Boolean getTcpNoDelay() { + return tcpNoDelay; + } + + public void setTcpNoDelay(boolean tcpNoDelay) { + this.tcpNoDelay = tcpNoDelay; + } + + public Integer getReceiveBufferSize() { + return receiveBufferSize; + } + + public void setReceiveBufferSize(int receiveBufferSize) { + this.receiveBufferSize = receiveBufferSize; + } + + public Integer getSendBufferSize() { + return sendBufferSize; + } + + public void setSendBufferSize(int sendBufferSize) { + this.sendBufferSize = sendBufferSize; + } +} + From c6a2f1e2cc3cab182b272a43c5ee868e2b6e6ac0 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 24 Oct 2012 20:31:36 +0200 Subject: [PATCH 055/719] Add queryOptions --- .../driver/core/ControlConnection.java | 2 +- .../datastax/driver/core/QueryOptions.java | 35 +++++ .../driver/core/RetryingCallback.java | 4 +- .../com/datastax/driver/core/Session.java | 145 ++++++++++++++---- .../configuration/LoadBalancingPolicy.java | 7 +- 5 files changed, 155 insertions(+), 38 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index d3dbaa23760..95998728345 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -86,7 +86,7 @@ private void setNewConnection(Connection newConnection) { private Connection reconnectInternal() throws NoHostAvailableException { - Iterator iter = balancingPolicy.newQueryPlan(); + Iterator iter = balancingPolicy.newQueryPlan(new QueryOptions()); Map errors = null; while (iter.hasNext()) { Host host = iter.next(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java new file mode 100644 index 00000000000..8429a6d5802 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java @@ -0,0 +1,35 @@ +package com.datastax.driver.core; + +public class QueryOptions { + + protected final ConsistencyLevel consistency; + + /** + * Creates a new query options object with default consistency level + * (ConsistencyLevel.ONE). + */ + public QueryOptions() { + this(null); + } + + /** + * Creates a new query options ojbect using the provided consistency. + * + * @param consistency the consistency level to use for the query. If {@code + * null} is provided and the request requires a consistency level, + * ConsistencyLevel.ONE is used. + */ + public QueryOptions(ConsistencyLevel consistency) { + this.consistency = consistency; + } + + /** + * The consistency level. + * + * @return the consistency level. Returns {@code null} if no consistency + * level has been specified. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index f9dc0c950c4..1e0d42da222 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -47,11 +47,11 @@ class RetryingCallback implements Connection.ResponseCallback { private volatile Map errors; - public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback) { + public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback, QueryOptions queryOptions) { this.manager = manager; this.callback = callback; - this.queryPlan = manager.loadBalancer.newQueryPlan(); + this.queryPlan = manager.loadBalancer.newQueryPlan(queryOptions); } public void sendRequest() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 08afec3c00f..e045d9ca5a7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -54,18 +54,13 @@ public class Session { * unauthorized or any other validation problem). */ public ResultSet execute(String query) throws NoHostAvailableException { - return execute(query, null); + return execute(query, new QueryOptions()); } /** * Execute the provided query. * - * This method blocks until at least some result has been received from the - * database. However, for SELECT queries, it does not guarantee that the - * result has been received in full. But it does guarantee that some - * response has been received from the database, and in particular - * guarantee that if the request is invalid, an exception will be thrown - * by this method. + * This method is a shortcut for {@code execute(query, new QueryOptions(consistency))}. * * @param query the CQL query to execute. * @param consistency the consistency level for the operation. If the query @@ -85,7 +80,35 @@ public ResultSet execute(String query) throws NoHostAvailableException { * unauthorized or any other validation problem). */ public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHostAvailableException { - return executeAsync(query, consistency).getUninterruptibly(); + return execute(query, new QueryOptions(consistency)); + } + + /** + * Execute the provided query. + * + * This method blocks until at least some result has been received from the + * database. However, for SELECT queries, it does not guarantee that the + * result has been received in full. But it does guarantee that some + * response has been received from the database, and in particular + * guarantee that if the request is invalid, an exception will be thrown + * by this method. + * + * @param query the CQL query to execute. + * @param queryOptions the options to use for this query. This includes at + * least the consistency level for the operation. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + */ + public ResultSet execute(String query, QueryOptions options) throws NoHostAvailableException { + return executeAsync(query, options).getUninterruptibly(); } /** @@ -98,7 +121,25 @@ public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHo * be empty (and will be for any non SELECT query). */ public ResultSet.Future executeAsync(String query) { - return executeAsync(query, null); + return executeAsync(query, new QueryOptions()); + } + + /** + * Execute the provided query asynchronously. + * + * This method is a shortcut for {@code executeAsync(query, new QueryOptions(consistency))}. + * + * @param query the CQL query to execute. + * @param consistency the consistency level for the operation. If the query + * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE + * queries for instance), this argument is ignored and null can be + * provided. However, if null is provided while the query requires a + * consistency level, the default consistency level of ONE is used. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + */ + public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) { + return executeAsync(query, new QueryOptions(consistency)); } /** @@ -115,16 +156,13 @@ public ResultSet.Future executeAsync(String query) { * method) to make sure the query was successful. * * @param query the CQL query to execute. - * @param consistency the consistency level for the operation. If the query - * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE - * queries for instance), this argument is ignored and null can be - * provided. However, if null is provided while the query requires a - * consistency level, the default consistency level of ONE is used. + * @param queryOptions the options to use for this query. This includes at + * least the consistency level for the operation. * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). */ - public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) { - return manager.executeQuery(new QueryMessage(query, ConsistencyLevel.toCassandraCL(consistency))); + public ResultSet.Future executeAsync(String query, QueryOptions options) { + return manager.executeQuery(new QueryMessage(query, ConsistencyLevel.toCassandraCL(options.getConsistencyLevel())), options); } /** @@ -138,7 +176,7 @@ public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) */ public PreparedStatement prepare(String query) throws NoHostAvailableException { Connection.Future future = new Connection.Future(new PrepareMessage(query)); - manager.execute(future); + manager.execute(future, new QueryOptions()); return toPreparedStatement(query, future); } @@ -162,16 +200,14 @@ public PreparedStatement prepare(String query) throws NoHostAvailableException { * unauthorized or any other validation problem). */ public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException { - return executePrepared(stmt); + return executePrepared(stmt, new QueryOptions()); } /** * Execute a prepared statement that had values provided for its bound * variables. * - * This method performs like {@link #execute} but for prepared statements. - * It blocks until at least some result has been received from the - * database. + * This method is a shortcut for {@code executePrepared(stmt, new QueryOptions(consistency))}. * * @param stmt the prepared statement with values for its bound variables. * @param consistency the consistency level for the operation. If the query @@ -192,7 +228,34 @@ public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableExce * unauthorized or any other validation problem). */ public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consistency) throws NoHostAvailableException { - return executePreparedAsync(stmt, consistency).getUninterruptibly(); + return executePrepared(stmt, new QueryOptions(consistency)); + } + + /** + * Execute a prepared statement that had values provided for its bound + * variables. + * + * This method performs like {@link #execute} but for prepared statements. + * It blocks until at least some result has been received from the + * database. + * + * @param stmt the prepared statement with values for its bound variables. + * @param queryOptions the options to use for this query. This includes at + * least the consistency level for the operation. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws IllegalStateException if {@code !stmt.ready()}. + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + */ + public ResultSet executePrepared(BoundStatement stmt, QueryOptions options) throws NoHostAvailableException { + return executePreparedAsync(stmt, options).getUninterruptibly(); } /** @@ -208,16 +271,14 @@ public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consisten * @throws IllegalStateException if {@code !stmt.ready()}. */ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { - return executePreparedAsync(stmt, null); + return executePreparedAsync(stmt, new QueryOptions()); } /** * Execute a prepared statement that had values provided for its bound * variables asynchronously. * - * This method performs like {@link #executeAsync} but for prepared - * statements. It return as soon as the query has been successfully sent to - * the database. + * This method is a shortcut for {@code executePreparedAsync(stmt, new QueryOptions(consistency))}. * * @param stmt the prepared statement with values for its bound variables. * @param consistency the consistency level for the operation. If the query @@ -231,10 +292,30 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt) { * @throws IllegalStateException if {@code !stmt.ready()}. */ public ResultSet.Future executePreparedAsync(BoundStatement stmt, ConsistencyLevel consistency) { + return executePreparedAsync(stmt, new QueryOptions(consistency)); + } + + /** + * Execute a prepared statement that had values provided for its bound + * variables asynchronously. + * + * This method performs like {@link #executeAsync} but for prepared + * statements. It return as soon as the query has been successfully sent to + * the database. + * + * @param stmt the prepared statement with values for its bound variables. + * @param queryOptions the options to use for this query. This includes at + * least the consistency level for the operation. + * @return the result of the query. That result will never be null but can + * be empty (and will be for any non SELECT query). + * + * @throws IllegalStateException if {@code !stmt.ready()}. + */ + public ResultSet.Future executePreparedAsync(BoundStatement stmt, QueryOptions queryOptions) { if (!stmt.isReady()) throw new IllegalStateException("Some bind variables haven't been bound in the provided statement"); - return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values), ConsistencyLevel.toCassandraCL(consistency))); + return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values), ConsistencyLevel.toCassandraCL(queryOptions.getConsistencyLevel())), queryOptions); } private PreparedStatement toPreparedStatement(String query, Connection.Future future) throws NoHostAvailableException { @@ -382,7 +463,7 @@ public void onRemove(Host host) { public void setKeyspace(String keyspace) throws NoHostAvailableException { try { - executeQuery(new QueryMessage("use " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL)).get(); + executeQuery(new QueryMessage("use " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL), new QueryOptions()).get(); } catch (InterruptedException e) { // TODO: do we want to handle interrupted exception in a better way? throw new DriverInternalError("Hey! I was waiting!", e); @@ -404,8 +485,8 @@ else if (cause instanceof DriverUncheckedException) * This method will find a suitable node to connect to using the * {@link LoadBalancingPolicy} and handle host failover. */ - public void execute(Connection.ResponseCallback callback) { - new RetryingCallback(this, callback).sendRequest(); + public void execute(Connection.ResponseCallback callback, QueryOptions options) { + new RetryingCallback(this, callback, options).sendRequest(); } public void prepare(String query, InetSocketAddress toExclude) { @@ -436,9 +517,9 @@ public void prepare(String query, InetSocketAddress toExclude) { } } - public ResultSet.Future executeQuery(Message.Request msg) { + public ResultSet.Future executeQuery(Message.Request msg, QueryOptions options) { ResultSet.Future future = new ResultSet.Future(this, msg); - execute(future.callback); + execute(future.callback, options); return future; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java index 9f73c0dcf4e..c319ef04115 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java @@ -52,11 +52,12 @@ public interface LoadBalancingPolicy extends Host.StateListener { * used. If all hosts of the returned {@code Iterator} are down, the query * will fail. * + * @param queryOptions the options used for the query. * @return an iterator of Host. The query is tried against the hosts * returned by this iterator in order, until the query has been sent * successfully to one of the host. */ - public Iterator newQueryPlan(); + public Iterator newQueryPlan(QueryOptions queryOptions); /** * Simple factory interface to allow creating {@link LoadBalancingPolicy} instances. @@ -118,7 +119,7 @@ public HostDistance distance(Host host) { * @return a new query plan, i.e. an iterator indicating which host to * try first for querying, which one to use as failover, etc... */ - public Iterator newQueryPlan() { + public Iterator newQueryPlan(QueryOptions queryOptions) { // We clone liveHosts because we want a version of the list that // cannot change concurrently of the query plan iterator (this @@ -261,7 +262,7 @@ public HostDistance distance(Host host) { * @return a new query plan, i.e. an iterator indicating which host to * try first for querying, which one to use as failover, etc... */ - public Iterator newQueryPlan() { + public Iterator newQueryPlan(QueryOptions queryOptions) { CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); final List hosts = localLiveHosts == null ? Collections.emptyList() : (List)localLiveHosts.clone(); From 17bf75490ba1d53b7a3584e5233700f94066de84 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 29 Oct 2012 19:03:39 +0100 Subject: [PATCH 056/719] Minor validation improvements --- .../com/datastax/driver/core/RetryingCallback.java | 4 +++- .../main/java/com/datastax/driver/core/Session.java | 3 --- .../driver/core/configuration/ReconnectionPolicy.java | 10 ++++++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 1e0d42da222..e552b90bf6f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -70,7 +70,9 @@ private boolean query(Host host) { return false; try { - Connection connection = pool.borrowConnection(manager.DEFAULT_PER_HOST_CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS); + // Note: this is not perfectly correct to use getConnectTimeoutMillis(), but + // until we provide a more fancy to control query timeouts, it's not a bad solution either + Connection connection = pool.borrowConnection(manager.configuration().getSocketOptions().getConnectTimeoutMillis(), TimeUnit.MILLISECONDS); current = host; try { connection.write(this); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index e045d9ca5a7..fb86378fd32 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -369,9 +369,6 @@ static class Manager implements Host.StateListener { final HostConnectionPool.PoolState poolsState; - // TODO: Make that configurable - final long DEFAULT_PER_HOST_CONNECTION_TIMEOUT = 3000; - public Connection.Factory connectionFactory() { return cluster.manager.connectionFactory; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java index ac06c304b43..66479f42b6a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java @@ -53,8 +53,10 @@ public static class Constant implements ReconnectionPolicy { private final long delayMs; - // TODO: validate arguments private Constant(long delayMs) { + if (delayMs < 0) + throw new IllegalArgumentException(String.format("Invalid negative delay (got %d) for ReconnectionPolicy", delayMs)); + this.delayMs = delayMs; } @@ -94,8 +96,12 @@ public static class Exponential implements ReconnectionPolicy { private final long maxDelayMs; private int attempts; - // TODO: validate arguments private Exponential(long baseDelayMs, long maxDelayMs) { + if (baseDelayMs < 0 || maxDelayMs < 0) + throw new IllegalArgumentException("Invalid negative delay for ReconnectionPolicy"); + if (maxDelayMs < baseDelayMs) + throw new IllegalArgumentException(String.format("maxDelayMs (got %d) cannot be smaller than baseDelayMs (got %d)", maxDelayMs, baseDelayMs)); + this.baseDelayMs = baseDelayMs; this.maxDelayMs = maxDelayMs; } From 12e0c7846d086e9bba164f2d240ac475ac8b425d Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 30 Oct 2012 18:53:17 +0100 Subject: [PATCH 057/719] Some bug fixes and begining of stress test example apps --- driver-core/pom.xml | 4 +- .../driver/core/BusyConnectionException.java | 8 + .../com/datastax/driver/core/Cluster.java | 22 +- .../datastax/driver/core/ClusterMetadata.java | 10 + .../com/datastax/driver/core/Connection.java | 39 +++- .../driver/core/ControlConnection.java | 61 +++-- .../driver/core/HostConnectionPool.java | 22 +- .../com/datastax/driver/core/ResultSet.java | 2 +- .../driver/core/RetryingCallback.java | 217 ++++++++++-------- .../com/datastax/driver/core/Session.java | 2 + .../driver/core/StreamIdGenerator.java | 7 +- .../exceptions/QueryValidationException.java | 1 + .../com/datastax/driver/core/SessionTest.java | 8 +- .../driver/core/StreamIdGeneratorTest.java | 4 +- driver-examples/pom.xml | 39 ++++ driver-examples/stress/pom.xml | 46 ++++ .../com/datastax/driver/stress/Consumer.java | 123 ++++++++++ .../com/datastax/driver/stress/Producer.java | 31 +++ .../driver/stress/QueryGenerator.java | 61 +++++ .../com/datastax/driver/stress/Reporter.java | 17 ++ .../com/datastax/driver/stress/Stress.java | 119 ++++++++++ pom.xml | 1 + 22 files changed, 699 insertions(+), 145 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java create mode 100644 driver-examples/pom.xml create mode 100644 driver-examples/stress/pom.xml create mode 100644 driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java create mode 100644 driver-examples/stress/src/main/java/com/datastax/driver/stress/Producer.java create mode 100644 driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java create mode 100644 driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java create mode 100644 driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 40cab65ac90..7a4d84d7b01 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -13,9 +13,9 @@ - org.jboss.netty + io.netty netty - 3.2.7.Final + 3.5.9.Final diff --git a/driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java new file mode 100644 index 00000000000..bef97b2c7c6 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/BusyConnectionException.java @@ -0,0 +1,8 @@ +package com.datastax.driver.core; + +class BusyConnectionException extends Exception +{ + public BusyConnectionException() { + super(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 87b579d3fc1..c919c9c4e8d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -15,8 +15,10 @@ import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.configuration.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.log4j.ConsoleAppender; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; /** * Informations and known state of a Cassandra cluster. @@ -38,7 +40,15 @@ */ public class Cluster { - private static final Logger logger = LoggerFactory.getLogger(Cluster.class); + private static final Logger logger = Logger.getLogger(Cluster.class); + + static { + Logger rootLogger = Logger.getRootLogger(); + if (!rootLogger.getAllAppenders().hasMoreElements()) { + rootLogger.setLevel(Level.DEBUG); + rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); + } + } /** * The default cassandra port for the native client protocol. @@ -133,6 +143,10 @@ public ClusterMetadata getMetadata() { return manager.metadata; } + public Cluster.Configuration getConfiguration() { + return manager.configuration; + } + /** * Initializer for {@link Cluster} instances. */ @@ -536,6 +550,8 @@ private void prepareAllQueries(Host host) { } } catch (ConnectionException e) { // Ignore, not a big deal + } catch (BusyConnectionException e) { + // Ignore, not a big deal } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index e2d2c0e1c01..51d55c6141a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -16,6 +16,7 @@ public class ClusterMetadata { private static final Logger logger = LoggerFactory.getLogger(ClusterMetadata.class); private final Cluster.Manager cluster; + volatile String clusterName; private final ConcurrentMap hosts = new ConcurrentHashMap(); private final ConcurrentMap keyspaces = new ConcurrentHashMap(); @@ -138,6 +139,15 @@ Collection allHosts() { return hosts.values(); } + /** + * The Cassandra name for the cluster connect to. + * + * @return the Cassandra name for the cluster connect to. + */ + public String getClusterName() { + return clusterName; + } + /** * Returns the known hosts of this cluster. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 651d601340b..1108025b38a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -9,6 +9,7 @@ import java.util.concurrent.atomic.AtomicReference; import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.service.ClientState; @@ -21,6 +22,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +// For LoggingHandler +//import org.jboss.netty.handler.logging.LoggingHandler; +//import org.jboss.netty.logging.InternalLogLevel; + /** * A connection to a Cassandra Node. */ @@ -123,10 +128,12 @@ private void initializeTransport() throws ConnectionException { default: throw defunct(new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type))); } + } catch (BusyConnectionException e) { + throw new DriverInternalError("Newly created connection should not be busy"); } catch (ExecutionException e) { throw defunct(new ConnectionException(address, "Unexpected error during transport initialization", e.getCause())); } catch (InterruptedException e) { - throw new RuntimeException(e); + throw new DriverInternalError(e); } } @@ -174,6 +181,8 @@ public void setKeyspace(String keyspace) throws ConnectionException { } } catch (ConnectionException e) { throw defunct(e); + } catch (BusyConnectionException e) { + logger.error("Tried to set the keyspace on busy connection. This should not happen but is not critical"); } catch (ExecutionException e) { throw defunct(new ConnectionException(address, "Error while setting keyspace", e)); } catch (InterruptedException e) { @@ -190,13 +199,13 @@ public void setKeyspace(String keyspace) throws ConnectionException { * @throws ConnectionException if the connection is closed * @throws TransportException if an I/O error while sending the request */ - public Future write(Message.Request request) throws ConnectionException { + public Future write(Message.Request request) throws ConnectionException, BusyConnectionException { Future future = new Future(request); write(future); return future; } - public void write(ResponseCallback callback) throws ConnectionException { + public void write(ResponseCallback callback) throws ConnectionException, BusyConnectionException { Message.Request request = callback.request(); @@ -270,6 +279,11 @@ public boolean isClosed() { return isClosed; } + @Override + public String toString() { + return String.format("Connection[%s, inFlight=%d, closed=%b]", name, inFlight.get(), isClosed); + } + // Cruft needed because we reuse server side classes, but we don't care about it public void validateNewMessage(Message.Type type) {}; public void applyStateTransition(Message.Type requestType, Message.Type responseType) {}; @@ -365,12 +379,11 @@ public void add(ResponseHandler handler) { public void removeHandler(int streamId) { pending.remove(streamId); + streamIdHandler.release(streamId); } @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { - logger.trace(String.format("[%s] received ", e.getMessage())); - if (!(e.getMessage() instanceof Message.Response)) { logger.debug(String.format("[%s] Received unexpected message: %s", name, e.getMessage())); defunct(new TransportException(address, "Unexpected message received: " + e.getMessage())); @@ -378,6 +391,9 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { } else { Message.Response response = (Message.Response)e.getMessage(); int streamId = response.getStreamId(); + + logger.trace(String.format("[%s] received: %s", name, e.getMessage())); + if (streamId < 0) { factory.defaultHandler().handle(response); return; @@ -385,9 +401,10 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { ResponseHandler handler = pending.remove(streamId); streamIdHandler.release(streamId); - if (handler == null) + if (handler == null) { // TODO: we should handle those with a default handler throw new RuntimeException("No handler set for " + streamId + ", handlers = " + pending); + } handler.callback.onSet(Connection.this, response); } } @@ -407,7 +424,7 @@ public void errorOutAllHandler(ConnectionException ce) { Iterator iter = pending.values().iterator(); while (iter.hasNext()) { - iter.next().callback.onException(ce); + iter.next().callback.onException(Connection.this, ce); iter.remove(); } } @@ -432,7 +449,7 @@ public void onSet(Connection connection, Message.Response response) { super.set(response); } - public void onException(Exception exception) { + public void onException(Connection connection, Exception exception) { super.setException(exception); } @@ -444,7 +461,7 @@ public InetSocketAddress getAddress() { interface ResponseCallback { public Message.Request request(); public void onSet(Connection connection, Message.Response response); - public void onException(Exception exception); + public void onException(Connection connection, Exception exception); } private static class ResponseHandler { @@ -452,7 +469,7 @@ private static class ResponseHandler { public final int streamId; public final ResponseCallback callback; - public ResponseHandler(Dispatcher dispatcher, ResponseCallback callback) { + public ResponseHandler(Dispatcher dispatcher, ResponseCallback callback) throws BusyConnectionException { this.streamId = dispatcher.streamIdHandler.next(); this.callback = callback; } @@ -494,7 +511,7 @@ public Connection newConnection(org.apache.cassandra.transport.Connection.Tracke public ChannelPipeline getPipeline() throws Exception { ChannelPipeline pipeline = Channels.pipeline(); - //pipeline.addLast("debug", new LoggingHandler()); + //pipeline.addLast("debug", new LoggingHandler(InternalLogLevel.INFO)); pipeline.addLast("frameDecoder", new Frame.Decoder(tracker, cfactory)); pipeline.addLast("frameEncoder", frameEncoder); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 95998728345..1d883b2bcab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -15,6 +15,7 @@ import org.slf4j.LoggerFactory; import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.NoHostAvailableException; class ControlConnection implements Host.StateListener { @@ -26,7 +27,7 @@ class ControlConnection implements Host.StateListener { private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; private static final String SELECT_PEERS = "SELECT peer, data_center, rack FROM system.peers"; - private static final String SELECT_LOCAL = "SELECT data_center, rack FROM system.local WHERE key='local'"; + private static final String SELECT_LOCAL = "SELECT cluster_name, data_center, rack FROM system.local WHERE key='local'"; private final AtomicReference connectionRef = new AtomicReference(); @@ -110,17 +111,21 @@ private Connection reconnectInternal() throws NoHostAvailableException { private Connection tryConnect(Host host) throws ConnectionException { Connection connection = cluster.connectionFactory.open(host); - logger.trace("[Control connection] Registering for events"); - List evs = Arrays.asList(new Event.Type[]{ - Event.Type.TOPOLOGY_CHANGE, - Event.Type.STATUS_CHANGE, - Event.Type.SCHEMA_CHANGE, - }); - connection.write(new RegisterMessage(evs)); - - refreshSchema(connection, null, null); - refreshNodeList(connection); - return connection; + try { + logger.trace("[Control connection] Registering for events"); + List evs = Arrays.asList(new Event.Type[]{ + Event.Type.TOPOLOGY_CHANGE, + Event.Type.STATUS_CHANGE, + Event.Type.SCHEMA_CHANGE, + }); + connection.write(new RegisterMessage(evs)); + + refreshNodeList(connection); + refreshSchema(connection, null, null); + return connection; + } catch (BusyConnectionException e) { + throw new DriverInternalError("Newly created connection should not be busy"); + } } public void refreshSchema(String keyspace, String table) { @@ -155,6 +160,9 @@ private void refreshSchema(Connection connection, String keyspace, String table) } catch (ConnectionException e) { logger.debug(String.format("[Control connection] Connection error when refeshing schema (%s)", e.getMessage())); reconnect(); + } catch (BusyConnectionException e) { + logger.info("[Control connection] Connection is busy, reconnecting"); + reconnect(); } catch (ExecutionException e) { logger.error("[Control connection] Unexpected error while refeshing schema", e); reconnect(); @@ -164,7 +172,7 @@ private void refreshSchema(Connection connection, String keyspace, String table) } } - private void refreshNodeList(Connection connection) { + private void refreshNodeList(Connection connection) throws BusyConnectionException { // Make sure we're up to date on node list try { ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); @@ -172,6 +180,21 @@ private void refreshNodeList(Connection connection) { connection.write(peersFuture.callback); connection.write(localFuture.callback); + // Update cluster name, DC and rack for the one node we are connected to + CQLRow localRow = localFuture.get().fetchOne(); + if (localRow != null) { + String clusterName = localRow.getString("cluster_name"); + if (clusterName != null) + cluster.metadata.clusterName = clusterName; + + Host host = cluster.metadata.getHost(connection.address); + // In theory host can't be null. However there is no point in risking a NPE in case we + // have a race between a node removal and this. + if (host != null) + host.setLocationInfo(localRow.getString("data_center"), localRow.getString("rack")); + } + + List foundHosts = new ArrayList(); List dcs = new ArrayList(); List racks = new ArrayList(); @@ -200,21 +223,15 @@ private void refreshNodeList(Connection connection) { if (!host.getAddress().equals(connection.address) && !foundHostsSet.contains(host.getAddress())) cluster.removeHost(host); - // Update DC and rack for the one node we are connected to - Host host = cluster.metadata.getHost(connection.address); - // In theory host can't be null. However there is no point in risking a NPE in case we - // have a race between a node removal and this. - if (host != null) { - for (CQLRow row : localFuture.get()) - host.setLocationInfo(row.getString("data_center"), row.getString("rack")); - } - } catch (ConnectionException e) { logger.debug(String.format("[Control connection] Connection error when refeshing hosts list (%s)", e.getMessage())); reconnect(); } catch (ExecutionException e) { logger.error("[Control connection] Unexpected error while refeshing hosts list", e); reconnect(); + } catch (BusyConnectionException e) { + logger.info("[Control connection] Connection is busy, reconnecting"); + reconnect(); } catch (InterruptedException e) { // TODO: it's bad to do that but at the same time it's annoying to be interrupted throw new RuntimeException(e); diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 1d6c15d979b..8a257250c35 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -14,8 +14,11 @@ // TODO: We should allow changing the core pool size (i.e. have a method that // adds new connection or trash existing one) class HostConnectionPool { + private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); + private static final int MAX_SIMULTANEOUS_CREATION = 1; + public final Host host; public volatile HostDistance hostDistance; private final Session.Manager manager; @@ -30,6 +33,8 @@ class HostConnectionPool { private final Runnable newConnectionTask; + private final AtomicInteger scheduledForCreation = new AtomicInteger(); + public HostConnectionPool(Host host, HostDistance hostDistance, Session.Manager manager) throws ConnectionException { this.host = host; this.hostDistance = hostDistance; @@ -38,6 +43,7 @@ public HostConnectionPool(Host host, HostDistance hostDistance, Session.Manager this.newConnectionTask = new Runnable() { public void run() { addConnectionIfUnderMaximum(); + scheduledForCreation.decrementAndGet(); } }; @@ -62,7 +68,7 @@ public Connection borrowConnection(long timeout, TimeUnit unit) throws Connectio if (connections.isEmpty()) { for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) - spawnNewConnection(); + manager.executor().submit(newConnectionTask); return waitForConnection(timeout, unit); } @@ -77,7 +83,7 @@ public Connection borrowConnection(long timeout, TimeUnit unit) throws Connectio } if (minInFlight >= options().getMaxSimultaneousRequestsPerConnectionTreshold(hostDistance) && connections.size() < options().getMaxConnectionPerHost(hostDistance)) - spawnNewConnection(); + maybeSpawnNewConnection(); while (true) { int inFlight = leastBusy.inFlight.get(); @@ -127,7 +133,6 @@ private void signalAllAvailableConnection() { } } - private Connection waitForConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { long start = System.currentTimeMillis(); long remaining = timeout; @@ -236,7 +241,16 @@ private boolean addConnectionIfUnderMaximum() { } } - private void spawnNewConnection() { + private void maybeSpawnNewConnection() { + while (true) { + int inCreation = scheduledForCreation.get(); + if (inCreation >= MAX_SIMULTANEOUS_CREATION) + return; + if (scheduledForCreation.compareAndSet(inCreation, inCreation + 1)) + break; + } + + logger.debug("Creating new connection on busy pool to " + host); manager.executor().submit(newConnectionTask); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 8216f044e75..75364306228 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -217,7 +217,7 @@ public void onSet(Connection connection, Message.Response response) { } } - public void onException(Exception exception) { + public void onException(Connection connection, Exception exception) { setException(exception); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index e552b90bf6f..dbad3528a0e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -41,6 +41,7 @@ class RetryingCallback implements Connection.ResponseCallback { private final Iterator queryPlan; private volatile Host current; + private volatile HostConnectionPool currentPool; private volatile int queryRetries; private volatile ConsistencyLevel retryConsistencyLevel; @@ -61,32 +62,42 @@ public void sendRequest() { if (query(host)) return; } - callback.onException(new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); + callback.onException(null, new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); } private boolean query(Host host) { - HostConnectionPool pool = manager.pools.get(host); - if (pool == null || pool.isShutdown()) + currentPool = manager.pools.get(host); + if (currentPool == null || currentPool.isShutdown()) return false; + Connection connection = null; try { // Note: this is not perfectly correct to use getConnectTimeoutMillis(), but // until we provide a more fancy to control query timeouts, it's not a bad solution either - Connection connection = pool.borrowConnection(manager.configuration().getSocketOptions().getConnectTimeoutMillis(), TimeUnit.MILLISECONDS); + connection = currentPool.borrowConnection(manager.configuration().getConnectionsConfiguration().getSocketOptions().getConnectTimeoutMillis(), TimeUnit.MILLISECONDS); current = host; - try { - connection.write(this); - return true; - } finally { - pool.returnConnection(connection); - } + connection.write(this); + return true; } catch (ConnectionException e) { // If we have any problem with the connection, move to the next node. + currentPool.returnConnection(connection); + logError(host.getAddress(), e.getMessage()); + return false; + } catch (BusyConnectionException e) { + // The pool shoudln't have give us a busy connection unless we've maxed up the pool, so move on to the next host. + currentPool.returnConnection(connection); logError(host.getAddress(), e.getMessage()); return false; } catch (TimeoutException e) { // We timeout, log it but move to the next node. + currentPool.returnConnection(connection); logError(host.getAddress(), "Timeout while trying to acquire available connection"); + currentPool.returnConnection(connection); + return false; + } catch (RuntimeException e) { + currentPool.returnConnection(connection); + logger.error("Unexpected error while querying " + host.getAddress(), e); + logError(host.getAddress(), e.getMessage()); return false; } } @@ -134,94 +145,120 @@ else if (request instanceof ExecuteMessage) { } public void onSet(Connection connection, Message.Response response) { - switch (response.type) { - case RESULT: - callback.onSet(connection, response); - break; - case ERROR: - ErrorMessage err = (ErrorMessage)response; - RetryPolicy.RetryDecision retry = null; - RetryPolicy retryPolicy = manager.configuration().getPolicies().getRetryPolicy(); - switch (err.error.code()) { - case READ_TIMEOUT: - assert err.error instanceof ReadTimeoutException; - ReadTimeoutException rte = (ReadTimeoutException)err.error; - ConsistencyLevel rcl = ConsistencyLevel.from(rte.consistency); - retry = retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, queryRetries); - break; - case WRITE_TIMEOUT: - assert err.error instanceof WriteTimeoutException; - WriteTimeoutException wte = (WriteTimeoutException)err.error; - ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); - retry = retryPolicy.onWriteTimeout(wcl, WriteType.from(wte.writeType), wte.received, wte.blockFor, queryRetries); - break; - case UNAVAILABLE: - assert err.error instanceof UnavailableException; - UnavailableException ue = (UnavailableException)err.error; - ConsistencyLevel ucl = ConsistencyLevel.from(ue.consistency); - retry = retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); - break; - case OVERLOADED: - // Try another node - retry(false, null); - return; - case IS_BOOTSTRAPPING: - // TODO: log error as this shouldn't happen - // Try another node - logger.error("Query sent to %s but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); - retry(false, null); - return; - case UNPREPARED: - assert err.error instanceof PreparedQueryNotFoundException; - PreparedQueryNotFoundException pqnf = (PreparedQueryNotFoundException)err.error; - String toPrepare = manager.cluster.manager.preparedQueries.get(pqnf.id); - if (toPrepare == null) { - // This shouldn't happen - String msg = String.format("Tried to execute unknown prepared query %s", pqnf.id); - logger.error(msg); - callback.onException(new DriverInternalError(msg)); - return; - } - try { - Message.Response prepareResponse = connection.write(new PrepareMessage(toPrepare)).get(); - // TODO check return ? - retry = RetryPolicy.RetryDecision.retry(null); - } catch (InterruptedException e) { - logError(connection.address, "Interrupted while preparing query to execute"); - retry(false, null); - return; - } catch (ExecutionException e) { - logError(connection.address, "Unexpected problem while preparing query to execute: " + e.getCause().getMessage()); + if (currentPool == null) { + // This should not happen but is probably not reason to fail completely + logger.error("No current pool set; this should not happen"); + } else { + currentPool.returnConnection(connection); + } + + try { + switch (response.type) { + case RESULT: + callback.onSet(connection, response); + break; + case ERROR: + ErrorMessage err = (ErrorMessage)response; + RetryPolicy.RetryDecision retry = null; + RetryPolicy retryPolicy = manager.configuration().getPolicies().getRetryPolicy(); + switch (err.error.code()) { + case READ_TIMEOUT: + assert err.error instanceof ReadTimeoutException; + ReadTimeoutException rte = (ReadTimeoutException)err.error; + ConsistencyLevel rcl = ConsistencyLevel.from(rte.consistency); + retry = retryPolicy.onReadTimeout(rcl, rte.received, rte.blockFor, rte.dataPresent, queryRetries); + break; + case WRITE_TIMEOUT: + assert err.error instanceof WriteTimeoutException; + WriteTimeoutException wte = (WriteTimeoutException)err.error; + ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); + retry = retryPolicy.onWriteTimeout(wcl, WriteType.from(wte.writeType), wte.received, wte.blockFor, queryRetries); + break; + case UNAVAILABLE: + assert err.error instanceof UnavailableException; + UnavailableException ue = (UnavailableException)err.error; + ConsistencyLevel ucl = ConsistencyLevel.from(ue.consistency); + retry = retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); + break; + case OVERLOADED: + // Try another node retry(false, null); return; - } catch (ConnectionException e) { - logger.debug("Connection exception while preparing missing statement", e); - logError(e.address, e.getMessage()); + case IS_BOOTSTRAPPING: + // TODO: log error as this shouldn't happen + // Try another node + logger.error("Query sent to %s but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); retry(false, null); return; - } - } - switch (retry.getType()) { - case RETRY: - ++queryRetries; - retry(true, retry.getRetryConsistencyLevel()); - break; - case RETHROW: + case UNPREPARED: + assert err.error instanceof PreparedQueryNotFoundException; + PreparedQueryNotFoundException pqnf = (PreparedQueryNotFoundException)err.error; + String toPrepare = manager.cluster.manager.preparedQueries.get(pqnf.id); + if (toPrepare == null) { + // This shouldn't happen + String msg = String.format("Tried to execute unknown prepared query %s", pqnf.id); + logger.error(msg); + callback.onException(connection, new DriverInternalError(msg)); + return; + } + + try { + Message.Response prepareResponse = connection.write(new PrepareMessage(toPrepare)).get(); + // TODO check return ? + retry = RetryPolicy.RetryDecision.retry(null); + } catch (InterruptedException e) { + logError(connection.address, "Interrupted while preparing query to execute"); + retry(false, null); + return; + } catch (ExecutionException e) { + logError(connection.address, "Unexpected problem while preparing query to execute: " + e.getCause().getMessage()); + retry(false, null); + return; + } catch (ConnectionException e) { + logger.debug("Connection exception while preparing missing statement", e); + logError(e.address, e.getMessage()); + retry(false, null); + return; + } + } + + if (retry == null) callback.onSet(connection, response); - break; - case IGNORE: - callback.onSet(connection, ResultMessage.Void.instance()); - break; - } - break; - default: - callback.onSet(connection, response); - break; + else { + switch (retry.getType()) { + case RETRY: + ++queryRetries; + retry(true, retry.getRetryConsistencyLevel()); + break; + case RETHROW: + callback.onSet(connection, response); + break; + case IGNORE: + callback.onSet(connection, new ResultMessage.Void()); + break; + } + } + break; + default: + callback.onSet(connection, response); + break; + } + } catch (Exception e) { + callback.onException(connection, e); } } - public void onException(Exception exception) { + public void onException(Connection connection, Exception exception) { + + if (connection != null) { + if (currentPool == null) { + // This should not happen but is probably not reason to fail completely + logger.error("No current pool set; this should not happen"); + } else { + currentPool.returnConnection(connection); + } + } if (exception instanceof ConnectionException) { ConnectionException ce = (ConnectionException)exception; @@ -230,7 +267,7 @@ public void onException(Exception exception) { return; } - callback.onException(exception); + callback.onException(connection, exception); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index fb86378fd32..f5c0d843382 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -499,6 +499,8 @@ public void prepare(String query, InetSocketAddress toExclude) { c.write(new PrepareMessage(query)).get(); } catch (ConnectionException e) { // Again, not being able to prepare the query right now is no big deal, so just ignore + } catch (BusyConnectionException e) { + // Same as above } catch (TimeoutException e) { // Same as above } catch (InterruptedException e) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java index 8b6cd036e92..7c60bb1413f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java +++ b/driver-core/src/main/java/com/datastax/driver/core/StreamIdGenerator.java @@ -20,7 +20,7 @@ public StreamIdGenerator() { bits.set(1, MAX_UNSIGNED_LONG); } - public int next() { + public int next() throws BusyConnectionException { int id = atomicGetAndSetFirstAvailable(0); if (id >= 0) return id; @@ -29,8 +29,7 @@ public int next() { if (id >= 0) return 64 + id; - // TODO: Throw a BusyConnectionException and handle it in the connection pool - throw new IllegalStateException(); + throw new BusyConnectionException(); } public void release(int streamId) { @@ -49,7 +48,7 @@ public int atomicGetAndSetFirstAvailable(int idx) { return -1; // Find the position of the right-most 1-bit - int id = Long.numberOfTrailingZeros(Long.lowestOneBit(l)); + int id = Long.numberOfTrailingZeros(l); if (bits.compareAndSet(idx, l, l ^ mask(id))) return id; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java index 94aa8335a19..2d3322e6bbd 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java @@ -6,6 +6,7 @@ */ public class QueryValidationException extends DriverUncheckedException { + // TODO: return the query with such exception protected QueryValidationException(String msg) { super(msg); } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index ddb4613bf4b..5291287873a 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -15,15 +15,11 @@ public class SessionTest { - // I really think we should make sure the library doesn't complain about - // log4j by default, but for now let's deal with it locally @BeforeClass public static void classSetUp() { Logger rootLogger = Logger.getRootLogger(); - if (!rootLogger.getAllAppenders().hasMoreElements()) { - rootLogger.setLevel(Level.TRACE); - rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); - } + rootLogger.setLevel(Level.TRACE); + rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); } //@Test diff --git a/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java b/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java index fc8168ab951..5ee0aeca371 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/StreamIdGeneratorTest.java @@ -6,7 +6,7 @@ public class StreamIdGeneratorTest { @Test - public void SimpleGenIdTest() { + public void SimpleGenIdTest() throws Exception { StreamIdGenerator generator = new StreamIdGenerator(); @@ -29,7 +29,7 @@ public void SimpleGenIdTest() { try { generator.next(); fail("No more streamId should be available"); - } catch (IllegalStateException e) { + } catch (BusyConnectionException e) { // Ok, expected } } diff --git a/driver-examples/pom.xml b/driver-examples/pom.xml new file mode 100644 index 00000000000..9acbce78b8c --- /dev/null +++ b/driver-examples/pom.xml @@ -0,0 +1,39 @@ + + 4.0.0 + + com.datastax.cassandra + cassandra-driver-parent + 0.1.0-SNAPSHOT + + com.datastax.cassandra + cassandra-driver-examples-parent + pom + 0.1.0-SNAPSHOT + Cassandra Java Driver Examples + http://www.datastax.com + + + stress + + + + + + + + + maven-compiler-plugin + 2.5.1 + + 1.6 + 1.6 + true + true + true + + + + + + diff --git a/driver-examples/stress/pom.xml b/driver-examples/stress/pom.xml new file mode 100644 index 00000000000..dd9b013750c --- /dev/null +++ b/driver-examples/stress/pom.xml @@ -0,0 +1,46 @@ + + 4.0.0 + + com.datastax.cassandra + cassandra-driver-examples-parent + 0.1.0-SNAPSHOT + + cassandra-driver-examples-stress + jar + Cassandra Java Driver Examples - Stress + http://www.datastax.com + + + + com.datastax.cassandra + cassandra-driver-core + 0.1.0-SNAPSHOT + + + + com.yammer.metrics + metrics-core + 2.1.2 + + + + + + + maven-assembly-plugin + + + + com.datastax.driver.examples.stress.Stress + + + + jar-with-dependencies + + + + + + + diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java new file mode 100644 index 00000000000..41aeca976e4 --- /dev/null +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java @@ -0,0 +1,123 @@ +package com.datastax.driver.examples.stress; + +import java.util.concurrent.*; + +import com.yammer.metrics.core.Meter; +import com.yammer.metrics.core.TimerContext; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.exceptions.NoHostAvailableException; + +public class Consumer extends Thread { + + protected final Session session; + protected final BlockingQueue workQueue; + protected final Reporter reporter; + + public Consumer(Session session, BlockingQueue workQueue, Reporter reporter) { + super("Consumer Thread"); + this.session = session; + this.workQueue = workQueue; + this.reporter = reporter; + this.setDaemon(true); + } + + public void run() { + try { + + while (true) { + QueryGenerator.Request request = workQueue.take(); + if (request == QueryGenerator.DONE_MARKER) { + shutdown(); + return; + } + + handle(request); + } + + } catch (InterruptedException e) { + System.err.println("Consumer interrupted" + (e.getMessage() != null ? ": " + e.getMessage() : "")); + } catch (NoHostAvailableException e) { + System.err.println("Error during query: " + e.getMessage()); + } + } + + protected void shutdown() {} + + protected void handle(QueryGenerator.Request request) throws NoHostAvailableException { + TimerContext context = reporter.latencies.time(); + try { + request.execute(session); + } finally { + context.stop(); + } + reporter.requests.mark(); + } + + public static class Asynchronous extends Consumer { + + private final BlockingQueue resultQueue; + + public Asynchronous(Session session, BlockingQueue workQueue, Reporter reporter, ResultHandler resultHandler) { + super(session, workQueue, reporter); + this.resultQueue = resultHandler.queue; + } + + protected void handle(QueryGenerator.Request request) throws NoHostAvailableException { + TimerContext context = reporter.latencies.time(); + resultQueue.offer(new Result(request.executeAsync(session), context, reporter.requests)); + } + + protected void shutdown() { + resultQueue.offer(Result.END_MARKER); + } + + private static class Result { + + static final Result END_MARKER = new Result(null, null, null); + + public final ResultSet.Future future; + public final TimerContext context; + public final Meter requests; + + public Result(ResultSet.Future future, TimerContext context, Meter requests) { + this.future = future; + this.context = context; + this.requests = requests; + } + } + + public static class ResultHandler extends Thread { + + private final BlockingQueue queue = new LinkedBlockingQueue(); + + public ResultHandler() { + super("Result Eater Thread"); + this.setDaemon(true); + } + + public void run() { + try { + + while (true) { + Result res = queue.take(); + if (res == Result.END_MARKER) + return; + + try { + res.future.getUninterruptibly(); + } finally { + res.context.stop(); + } + res.requests.mark(); + } + + } catch (InterruptedException e) { + System.err.println("Consumer interrupted" + (e.getMessage() != null ? ": " + e.getMessage() : "")); + } catch (NoHostAvailableException e) { + System.err.println("Error retrieving result to query: " + e.getMessage()); + } + } + } + } +} diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Producer.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Producer.java new file mode 100644 index 00000000000..e7adb3db282 --- /dev/null +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Producer.java @@ -0,0 +1,31 @@ +package com.datastax.driver.examples.stress; + +import java.util.concurrent.*; + +public class Producer extends Thread { + + private final QueryGenerator generator; + private final BlockingQueue workQueue; + + public Producer(QueryGenerator generator, BlockingQueue workQueue) { + super("Producer Thread"); + this.generator = generator; + this.workQueue = workQueue; + this.setDaemon(true); + } + + public void run() { + + try { + + while (generator.hasNext()) + workQueue.put(generator.next()); + + workQueue.put(QueryGenerator.DONE_MARKER); + + } catch (InterruptedException e) { + System.err.println("Producer interrupted" + (e.getMessage() != null ? ": " + e.getMessage() : "")); + return; + } + } +} diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java new file mode 100644 index 00000000000..ea9239496e9 --- /dev/null +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java @@ -0,0 +1,61 @@ +package com.datastax.driver.examples.stress; + +import java.util.Iterator; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.exceptions.*; + +public interface QueryGenerator extends Iterator { + + static final Request DONE_MARKER = new Request() { + public ResultSet execute(Session session) throws NoHostAvailableException { return null; } + public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { return null; }; + }; + + public void createSchema(Session session) throws NoHostAvailableException; + + public interface Request { + + public ResultSet execute(Session session) throws NoHostAvailableException; + + public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException; + + public static class SimpleQuery implements Request { + + private final String query; + private final QueryOptions options; + + public SimpleQuery(String query, QueryOptions options) { + this.query = query; + this.options = options; + } + + public ResultSet execute(Session session) throws NoHostAvailableException { + return session.execute(query, options); + } + + public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { + return session.executeAsync(query, options); + } + } + + public static class PreparedQuery implements Request { + + private final BoundStatement query; + private final QueryOptions options; + + public PreparedQuery(BoundStatement query, QueryOptions options) { + this.query = query; + this.options = options; + } + + public ResultSet execute(Session session) throws NoHostAvailableException { + return session.executePrepared(query, options); + } + + public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { + return session.executePreparedAsync(query, options); + } + } + } +} diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java new file mode 100644 index 00000000000..a934a1856a1 --- /dev/null +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java @@ -0,0 +1,17 @@ +package com.datastax.driver.examples.stress; + +import java.util.concurrent.*; + +import com.yammer.metrics.Metrics; +import com.yammer.metrics.core.*; +import com.yammer.metrics.reporting.*; + +public class Reporter { + + public final Meter requests = Metrics.newMeter(Reporter.class, "requests", "requests", TimeUnit.SECONDS); + public final Timer latencies = Metrics.newTimer(Reporter.class, "latencies", TimeUnit.MILLISECONDS, TimeUnit.SECONDS); + + public Reporter() { + ConsoleReporter.enable(5, TimeUnit.SECONDS); + } +} diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java new file mode 100644 index 00000000000..0c2040b7474 --- /dev/null +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java @@ -0,0 +1,119 @@ +package com.datastax.driver.examples.stress; + +import java.util.*; +import java.util.concurrent.*; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.exceptions.*; + +/** + * A simple stress tool to demonstrate the use of the driver. + * + * Sample usage: + * stress insert -n 100000 + * stress read -n 10000 + */ +public class Stress { + + public static void main(String[] args) throws Exception { + + if (args.length < 3) { + System.err.println("Missing arguments"); + System.exit(1); + } + + final int ITERATIONS = Integer.parseInt(args[1]); + final int THREADS = Integer.parseInt(args[2]); + + boolean async = false; + + QueryGenerator generator = new QueryGenerator() { + + private int i; + + public void createSchema(Session session) throws NoHostAvailableException { + try { session.execute("CREATE KEYSPACE stress_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + session.execute("USE stress_ks"); + + try { + session.execute("CREATE TABLE stress_cf (k int, c int, v int, PRIMARY KEY (k, c))"); + } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + } + + public boolean hasNext() { + return i < ITERATIONS; + } + + public QueryGenerator.Request next() { + String query = String.format("INSERT INTO stress_cf(k, c, v) VALUES (%d, %d, %d)", i, i, i); + ++i; + return new QueryGenerator.Request.SimpleQuery(query, new QueryOptions()); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + + BlockingQueue workQueue = new SynchronousQueue(true); + + try { + // Create session to hosts + Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + + //PoolingOptions pools = cluster.getConfiguration().getConnectionsConfiguration().getPoolingOptions(); + //pools.setCoreConnectionsPerHost(HostDistance.LOCAL, 2); + //pools.setMaxConnectionsPerHost(HostDistance.LOCAL, 2); + + Session session = cluster.connect(); + + ClusterMetadata metadata = cluster.getMetadata(); + System.out.println(String.format("Connected to cluster '%s' on %s.", metadata.getClusterName(), metadata.getAllHosts())); + + System.out.println("Creating schema..."); + generator.createSchema(session); + + Reporter reporter = new Reporter(); + Producer producer = new Producer(generator, workQueue); + + Consumer[] consumers = new Consumer[THREADS]; + Consumer.Asynchronous.ResultHandler resultHandler = async ? new Consumer.Asynchronous.ResultHandler() : null; + for (int i = 0; i < THREADS; i++) { + consumers[i] = async + ? new Consumer.Asynchronous(session, workQueue, reporter, resultHandler) + : new Consumer(session, workQueue, reporter); + } + + System.out.println("Starting to stress test..."); + producer.start(); + if (resultHandler != null) + resultHandler.start(); + for (Consumer consumer : consumers) + consumer.start(); + + producer.join(); + for (Consumer consumer : consumers) + consumer.join(); + if (resultHandler != null) + resultHandler.join(); + + System.out.println("Stress test successful."); + System.exit(0); + + } catch (NoHostAvailableException e) { + System.err.println("No alive hosts to use: " + e.getMessage()); + System.exit(1); + } catch (QueryExecutionException e) { + System.err.println(e.getMessage()); + System.exit(1); + } catch (QueryValidationException e) { + System.err.println("Invalid query: " + e.getMessage()); + System.exit(1); + } catch (Exception e) { + System.err.println("Unexpected error: " + e.getMessage()); + e.printStackTrace(); + System.exit(1); + } + } +} diff --git a/pom.xml b/pom.xml index 900cf8120f7..4a0348bdb75 100644 --- a/pom.xml +++ b/pom.xml @@ -11,6 +11,7 @@ driver-core driver-jdbc + driver-examples From 5f109bf7964dff7db90edc259669847cfca39e4f Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 31 Oct 2012 17:00:19 +0100 Subject: [PATCH 058/719] Improve stress tester example --- driver-examples/stress/pom.xml | 6 ++ .../datastax/driver/stress/Generators.java | 73 +++++++++++++++++++ .../driver/stress/QueryGenerator.java | 14 +++- .../com/datastax/driver/stress/Stress.java | 59 ++++++++------- 4 files changed, 123 insertions(+), 29 deletions(-) create mode 100644 driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java diff --git a/driver-examples/stress/pom.xml b/driver-examples/stress/pom.xml index dd9b013750c..e2c88baacd4 100644 --- a/driver-examples/stress/pom.xml +++ b/driver-examples/stress/pom.xml @@ -23,6 +23,12 @@ metrics-core 2.1.2 + + + net.sf.jopt-simple + jopt-simple + 4.3 + diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java new file mode 100644 index 00000000000..b743a23e7fe --- /dev/null +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java @@ -0,0 +1,73 @@ +package com.datastax.driver.examples.stress; + +import com.datastax.driver.core.*; +import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.exceptions.*; + +public class Generators { + + public static final QueryGenerator.Builder SIMPLE_INSERTER = new QueryGenerator.Builder() { + public QueryGenerator create(final int iterations) { + return new QueryGenerator(iterations) { + private int i; + + public void createSchema(Session session) throws NoHostAvailableException { + try { session.execute("CREATE KEYSPACE stress_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + session.execute("USE stress_ks"); + + try { + session.execute("CREATE TABLE stress_cf (k int, c int, v int, PRIMARY KEY (k, c))"); + } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + } + + public boolean hasNext() { + return i < iterations; + } + + public QueryGenerator.Request next() { + String query = String.format("INSERT INTO stress_cf(k, c, v) VALUES (%d, %d, %d)", i, i, i); + ++i; + return new QueryGenerator.Request.SimpleQuery(query, new QueryOptions()); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + + public static final QueryGenerator.Builder SIMPLE_PREPARED_INSERTER = new QueryGenerator.Builder() { + public QueryGenerator create(final int iterations) { + return new QueryGenerator(iterations) { + private int i; + private PreparedStatement stmt; + + public void createSchema(Session session) throws NoHostAvailableException { + try { session.execute("CREATE KEYSPACE stress_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + session.execute("USE stress_ks"); + + try { + session.execute("CREATE TABLE stress_cf (k int, c int, v int, PRIMARY KEY (k, c))"); + } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + + stmt = session.prepare("INSERT INTO stress_cf(k, c, v) VALUES (?, ?, ?)"); + } + + public boolean hasNext() { + return i < iterations; + } + + public QueryGenerator.Request next() { + BoundStatement b = stmt.bind(i, i, i); + ++i; + return new QueryGenerator.Request.PreparedQuery(b, new QueryOptions()); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; +} diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java index ea9239496e9..4dbac563a0b 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java @@ -5,14 +5,24 @@ import com.datastax.driver.core.*; import com.datastax.driver.core.exceptions.*; -public interface QueryGenerator extends Iterator { +public abstract class QueryGenerator implements Iterator { static final Request DONE_MARKER = new Request() { public ResultSet execute(Session session) throws NoHostAvailableException { return null; } public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { return null; }; }; - public void createSchema(Session session) throws NoHostAvailableException; + protected final int iterations; + + protected QueryGenerator(int iterations) { + this.iterations = iterations; + } + + public abstract void createSchema(Session session) throws NoHostAvailableException; + + public interface Builder { + public QueryGenerator create(int iterations); + } public interface Request { diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java index 0c2040b7474..6b6d98e7cf0 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java @@ -7,6 +7,9 @@ import com.datastax.driver.core.configuration.*; import com.datastax.driver.core.exceptions.*; +import joptsimple.OptionParser; +import joptsimple.OptionSet; + /** * A simple stress tool to demonstrate the use of the driver. * @@ -16,45 +19,47 @@ */ public class Stress { + private static final Map generators = new HashMap(); + + public static void register(String name, QueryGenerator.Builder generator) { + if (generators.containsKey(name)) + throw new IllegalStateException("There is already a generator registered with the name " + name); + + generators.put(name, generator); + } + public static void main(String[] args) throws Exception { - if (args.length < 3) { - System.err.println("Missing arguments"); + register("insert", Generators.SIMPLE_INSERTER); + register("insert_prepared", Generators.SIMPLE_PREPARED_INSERTER); + + if (args.length < 2) { + System.err.println("Missing argument, you must at least provide the action to do"); System.exit(1); } - final int ITERATIONS = Integer.parseInt(args[1]); - final int THREADS = Integer.parseInt(args[2]); - - boolean async = false; + String action = args[1]; + if (!generators.containsKey(action)) { + System.err.println(String.format("Unknown generator '%s' (known generators: %s)", action, generators.keySet())); + System.exit(1); + } - QueryGenerator generator = new QueryGenerator() { + String[] opts = new String[args.length - 2]; + System.arraycopy(args, 2, opts, 0, opts.length); - private int i; + OptionParser parser = new OptionParser(); - public void createSchema(Session session) throws NoHostAvailableException { - try { session.execute("CREATE KEYSPACE stress_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); } catch (AlreadyExistsException e) { /* It's ok, ignore */ } - session.execute("USE stress_ks"); + parser.accepts("n", "Number of iterations for the query generator (default: 1,000,000)").withRequiredArg().ofType(Integer.class); + parser.accepts("t", "Number of threads to use (default: 30)").withRequiredArg().ofType(Integer.class); - try { - session.execute("CREATE TABLE stress_cf (k int, c int, v int, PRIMARY KEY (k, c))"); - } catch (AlreadyExistsException e) { /* It's ok, ignore */ } - } + OptionSet options = parser.parse(opts); - public boolean hasNext() { - return i < ITERATIONS; - } + int ITERATIONS = options.has("n") ? (Integer)options.valueOf("n") : 1000000; + int THREADS = options.has("t") ? (Integer)options.valueOf("t") : 30; - public QueryGenerator.Request next() { - String query = String.format("INSERT INTO stress_cf(k, c, v) VALUES (%d, %d, %d)", i, i, i); - ++i; - return new QueryGenerator.Request.SimpleQuery(query, new QueryOptions()); - } + QueryGenerator generator = generators.get(action).create(ITERATIONS); - public void remove() { - throw new UnsupportedOperationException(); - } - }; + boolean async = false; BlockingQueue workQueue = new SynchronousQueue(true); From a96c52a257c60166532a14e4c4fc5cec56b5da00 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 31 Oct 2012 18:36:57 +0100 Subject: [PATCH 059/719] Few minor improvements --- .../src/main/java/com/datastax/driver/core/Cluster.java | 8 ++++++-- .../java/com/datastax/driver/core/ControlConnection.java | 4 ++-- .../main/java/com/datastax/driver/core/ResultSet.java | 1 - .../src/main/java/com/datastax/driver/core/Session.java | 6 ++++-- .../main/java/com/datastax/driver/stress/Reporter.java | 9 +++++++-- .../src/main/java/com/datastax/driver/stress/Stress.java | 4 +++- 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index c919c9c4e8d..91068504c41 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -509,8 +509,10 @@ public void onRemove(Host host) { public Host addHost(InetSocketAddress address, boolean signal) { Host newHost = metadata.add(address); - if (newHost != null && signal) + if (newHost != null && signal) { + logger.info(String.format("New Cassandra host %s added", newHost)); onAdd(newHost); + } return newHost; } @@ -518,8 +520,10 @@ public void removeHost(Host host) { if (host == null) return; - if (metadata.remove(host)) + if (metadata.remove(host)) { + logger.info(String.format("Cassandra host %s removed", host)); onRemove(host); + } } // Prepare a query on all nodes diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 1d883b2bcab..5ad6e7db5e8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -161,7 +161,7 @@ private void refreshSchema(Connection connection, String keyspace, String table) logger.debug(String.format("[Control connection] Connection error when refeshing schema (%s)", e.getMessage())); reconnect(); } catch (BusyConnectionException e) { - logger.info("[Control connection] Connection is busy, reconnecting"); + logger.debug("[Control connection] Connection is busy, reconnecting"); reconnect(); } catch (ExecutionException e) { logger.error("[Control connection] Unexpected error while refeshing schema", e); @@ -230,7 +230,7 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti logger.error("[Control connection] Unexpected error while refeshing hosts list", e); reconnect(); } catch (BusyConnectionException e) { - logger.info("[Control connection] Connection is busy, reconnecting"); + logger.debug("[Control connection] Connection is busy, reconnecting"); reconnect(); } catch (InterruptedException e) { // TODO: it's bad to do that but at the same time it's annoying to be interrupted diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 75364306228..c340cc998a9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -208,7 +208,6 @@ public void onSet(Connection connection, Message.Response response) { break; default: // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - logger.info("Got " + response); throw new RuntimeException(); } } catch (Exception e) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index f5c0d843382..30677913734 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -396,10 +396,12 @@ public Manager(Cluster cluster, Collection hosts) { private HostConnectionPool addHost(Host host) { try { HostDistance distance = loadBalancer.distance(host); - if (distance == HostDistance.IGNORED) + if (distance == HostDistance.IGNORED) { return pools.get(host); - else + } else { + logger.debug(String.format("Adding %s to list of queried hosts", host)); return pools.put(host, new HostConnectionPool(host, distance, this)); + } } catch (ConnectionException e) { logger.debug(String.format("Error creating pool to %s (%s)", host, e.getMessage())); host.getMonitor().signalConnectionFailure(e); diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java index a934a1856a1..b9648a340ca 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Reporter.java @@ -1,5 +1,6 @@ package com.datastax.driver.examples.stress; +import java.io.File; import java.util.concurrent.*; import com.yammer.metrics.Metrics; @@ -11,7 +12,11 @@ public class Reporter { public final Meter requests = Metrics.newMeter(Reporter.class, "requests", "requests", TimeUnit.SECONDS); public final Timer latencies = Metrics.newTimer(Reporter.class, "latencies", TimeUnit.MILLISECONDS, TimeUnit.SECONDS); - public Reporter() { - ConsoleReporter.enable(5, TimeUnit.SECONDS); + public Reporter(boolean useCsv) { + if (useCsv) { + CsvReporter.enable(new File("metrics"), 1, TimeUnit.SECONDS); + } else { + ConsoleReporter.enable(5, TimeUnit.SECONDS); + } } } diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java index 6b6d98e7cf0..56fe0af9308 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java @@ -51,6 +51,7 @@ public static void main(String[] args) throws Exception { parser.accepts("n", "Number of iterations for the query generator (default: 1,000,000)").withRequiredArg().ofType(Integer.class); parser.accepts("t", "Number of threads to use (default: 30)").withRequiredArg().ofType(Integer.class); + parser.accepts("csv", "Save metrics into csv instead of displaying on stdout"); OptionSet options = parser.parse(opts); @@ -60,6 +61,7 @@ public static void main(String[] args) throws Exception { QueryGenerator generator = generators.get(action).create(ITERATIONS); boolean async = false; + boolean useCsv = options.has("csv"); BlockingQueue workQueue = new SynchronousQueue(true); @@ -79,7 +81,7 @@ public static void main(String[] args) throws Exception { System.out.println("Creating schema..."); generator.createSchema(session); - Reporter reporter = new Reporter(); + Reporter reporter = new Reporter(useCsv); Producer producer = new Producer(generator, workQueue); Consumer[] consumers = new Consumer[THREADS]; From 4e74e7771cb04fd014a08ca7810dad4a28fe569a Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 1 Nov 2012 15:33:16 +0100 Subject: [PATCH 060/719] Add tracing support --- .../com/datastax/driver/core/Connection.java | 2 +- .../driver/core/ControlConnection.java | 2 +- .../datastax/driver/core/QueryOptions.java | 37 +++ .../com/datastax/driver/core/QueryTrace.java | 253 ++++++++++++++++++ .../com/datastax/driver/core/ResultSet.java | 37 ++- .../com/datastax/driver/core/Session.java | 9 +- .../com/datastax/driver/core/SessionTest.java | 76 ++++-- 7 files changed, 378 insertions(+), 38 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 1108025b38a..363b1a517b2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -165,7 +165,7 @@ public void setKeyspace(String keyspace) throws ConnectionException { try { logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); - Message.Response response = write(new QueryMessage("USE " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL)).get(); + Message.Response response = write(new QueryMessage("USE \"" + keyspace + "\"", ConsistencyLevel.DEFAULT_CASSANDRA_CL)).get(); switch (response.type) { case RESULT: this.keyspace = keyspace; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 5ad6e7db5e8..1978c0251e7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -87,7 +87,7 @@ private void setNewConnection(Connection newConnection) { private Connection reconnectInternal() throws NoHostAvailableException { - Iterator iter = balancingPolicy.newQueryPlan(new QueryOptions()); + Iterator iter = balancingPolicy.newQueryPlan(QueryOptions.DEFAULT); Map errors = null; while (iter.hasNext()) { Host host = iter.next(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java index 8429a6d5802..15824285947 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java @@ -2,7 +2,12 @@ public class QueryOptions { + // Don't expose that publicly as this would break if someone uses + // traceQuery (and don't use internally if traceQuery is set). + static final QueryOptions DEFAULT = new QueryOptions(); + protected final ConsistencyLevel consistency; + protected volatile boolean traceQuery; /** * Creates a new query options object with default consistency level @@ -32,4 +37,36 @@ public QueryOptions(ConsistencyLevel consistency) { public ConsistencyLevel getConsistencyLevel() { return consistency; } + + /** + * Enable tracing for the query using these options. + * + * By default (i.e. unless you call this method), tracing is not enabled. + * + * @return this {@code QueryOptions} object. + */ + public QueryOptions setTracing() { + traceQuery = true; + return this; + } + + /** + * Disable tracing for the query using these options. + * + * @return this {@code QueryOptions} object. + */ + public QueryOptions unsetTracing() { + traceQuery = false; + return this; + } + + /** + * Whether to trace the query or not. + * + * @return {@code true} if this QueryOptions has query tracing enable, + * {@code false} otherwise. + */ + public boolean isTracing() { + return traceQuery; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java new file mode 100644 index 00000000000..cb75159723d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -0,0 +1,253 @@ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.util.*; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.cassandra.transport.messages.QueryMessage; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The Cassandra trace for a query. + *

+ * Such trace is generated by Cassandra when query tracing is enabled for the + * query. The trace itself is stored in Cassandra in the {@code sessions} and + * {@code events} table in the {@code system_traces} keyspace and can be + * retrieve manually using the trace identifier (the one returned by + * {@link #getTraceId}). + *

+ * This class provides facilities to fetch the traces from Cassandra. Please + * note that the writting of the trace is done asynchronously in Cassandra. So + * accessing the trace too soon after the query may result in the trace being + * incomplete. + */ +public class QueryTrace { + private static final Logger logger = LoggerFactory.getLogger(QueryTrace.class); + + private static final String SELECT_SESSIONS_FORMAT = "SELECT * FROM system_traces.sessions WHERE session_id = %s"; + private static final String SELECT_EVENTS_FORMAT = "SELECT * FROM system_traces.events WHERE session_id = %s"; + + private final UUID traceId; + + private String requestType; + // We use the duration to figure out if the trace is complete, because + // that's the last event that is written (and it is written asynchronously + // so it's possible that a fetch gets all the trace except the duration). + private int duration = Integer.MIN_VALUE; + private InetAddress coordinator; + private Map parameters; + private long startedAt; + private List events; + + private final Session.Manager session; + private final Lock fetchLock = new ReentrantLock(); + + QueryTrace(UUID traceId, Session.Manager session) { + this.traceId = traceId; + this.session = session; + } + + /** + * The identifier of this trace. + * + * @return the identifier of this trace. + */ + public UUID getTraceId() { + return traceId; + } + + /** + * The type of request. + * + * @return the type of request. This method returns {@code null} if the request + * type is not yet available. + */ + public String getRequestType() { + maybeFetchTrace(); + return requestType; + } + + /** + * The (server side) duration of the query in microseconds. + * + * @return the (server side) duration of the query in microseconds. This + * method will return {@code Integer.MIN_VALUE} if the duration is not yet + * available. + */ + public int getDurationMicros() { + maybeFetchTrace(); + return duration; + } + + /** + * The coordinator host of the query. + * + * @return the coordinator host of the query. This method returns {@code null} + * if the coordinator is not yet available. + */ + public InetAddress getCoordinator() { + maybeFetchTrace(); + return coordinator; + } + + /** + * The parameters attached to this trace. + * + * @return the parameters attached to this trace. This method returns + * {@code null} if the coordinator is not yet available. + */ + public Map getParameters() { + maybeFetchTrace(); + return parameters; + } + + /** + * The server side timestamp of the start of this query. + * + * @return the server side timestamp of the start of this query. This + * method returns 0 if the start timestamp is not available. + */ + public long getStartedAt() { + maybeFetchTrace(); + return startedAt; + } + + /** + * The events contained in this trace. + * + * @return the events contained in this trace. + */ + public List getEvents() { + maybeFetchTrace(); + return events; + } + + @Override + public String toString() { + maybeFetchTrace(); + return String.format("%s [%s] - %dµs", requestType, traceId, duration); + } + + private void maybeFetchTrace() { + if (duration != Integer.MIN_VALUE) + return; + + fetchLock.lock(); + try { + // If by the time we grab the lock we've fetch the events, it's + // fine, move on. Otherwise, fetch them. + if (duration == Integer.MIN_VALUE) + doFetchTrace(); + } finally { + fetchLock.unlock(); + } + } + + private void doFetchTrace() { + try { + ResultSet.Future sessionsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_SESSIONS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), QueryOptions.DEFAULT); + ResultSet.Future eventsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_EVENTS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), QueryOptions.DEFAULT); + + CQLRow sessRow = sessionsFuture.get().fetchOne(); + if (sessRow != null) { + requestType = sessRow.getString("request"); + if (!sessRow.isNull("duration")) + duration = sessRow.getInt("duration"); + coordinator = sessRow.getInet("coordinator"); + if (!sessRow.isNull("parameters")) + parameters = Collections.unmodifiableMap(sessRow.getMap("parameters", String.class, String.class)); + startedAt = sessRow.getLong("started_at"); + } + + events = new ArrayList(); + for (CQLRow evRow : eventsFuture.get()) { + events.add(new Event(evRow.getString("activity"), + evRow.getUUID("event_id").timestamp(), + evRow.getInet("source"), + evRow.getInt("source_elapsed"), + evRow.getString("thread"))); + } + events = Collections.unmodifiableList(events); + + } catch (Exception e) { + logger.error("Unexpected exception while fetching query trace", e); + } + } + + /** + * A trace event. + *

+ * A query trace is composed of a list of trace events. + */ + public static class Event { + private final String name; + private final long timestamp; + private final InetAddress source; + private final int sourceElapsed; + private final String threadName; + + private Event(String name, long timestamp, InetAddress source, int sourceElapsed, String threadName) { + this.name = name; + // Convert the UUID timestamp to an epoch timestamp; I stole this seemingly random value from cqlsh, hopefully it's correct. + this.timestamp = (timestamp - 0x01b21dd213814000L) / 10000; + this.source = source; + this.sourceElapsed = sourceElapsed; + this.threadName = threadName; + } + + /** + * The event description, i.e. which activity this event correspond to. + * + * @return the event description. + */ + public String getDescription() { + return name; + } + + /** + * The server side timestamp of the event. + * + * @return the server side timestamp of the event. + */ + public long getTimestamp() { + return timestamp; + } + + /** + * The address of the host having generated this event. + * + * @return the address of the host having generated this event. + */ + public InetAddress getSource() { + return source; + } + + /** + * The number of microseconds elapsed on the source when this event + * occurred since when the source started handling the query. + * + * @return the elapsed time on the source host when that event happened + * in microseconds. + */ + public int getSourceElapsedMicros() { + return sourceElapsed; + } + + /** + * The name of the thread on which this event occured. + * + * @return the name of the thread on which this event occured. + */ + public String getThreadName() { + return threadName; + } + + @Override + public String toString() { + return String.format("%s on %s[%s] at %s", name, source, threadName, new Date(timestamp)); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index c340cc998a9..c51c9377ce6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -29,39 +29,50 @@ public class ResultSet implements Iterable { private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); - private static final ResultSet EMPTY = new ResultSet(ColumnDefinitions.EMPTY, new ArrayDeque(0)); + private static final Queue> EMPTY_QUEUE = new ArrayDeque(0); + private static final ResultSet EMPTY = new ResultSet(ColumnDefinitions.EMPTY, EMPTY_QUEUE, null); private final ColumnDefinitions metadata; private final Queue> rows; + private final QueryTrace trace; - private ResultSet(ColumnDefinitions metadata, Queue> rows) { + private ResultSet(ColumnDefinitions metadata, Queue> rows, QueryTrace trace) { this.metadata = metadata; this.rows = rows; + this.trace = trace; } - private static ResultSet fromMessage(ResultMessage msg) { + private static ResultSet fromMessage(ResultMessage msg, Session.Manager session) { + + UUID tracingId = msg.getTracingId(); + QueryTrace trace = tracingId == null ? null : new QueryTrace(tracingId, session); + switch (msg.kind) { case VOID: - return EMPTY; + return empty(trace); case ROWS: ResultMessage.Rows r = (ResultMessage.Rows)msg; ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[r.result.metadata.names.size()]; for (int i = 0; i < defs.length; i++) defs[i] = ColumnDefinitions.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); - return new ResultSet(new ColumnDefinitions(defs), new ArrayDeque(r.result.rows)); + return new ResultSet(new ColumnDefinitions(defs), new ArrayDeque(r.result.rows), trace); case SET_KEYSPACE: case SCHEMA_CHANGE: - return EMPTY; + return empty(trace); case PREPARED: throw new RuntimeException("Prepared statement received when a ResultSet was expected"); default: logger.error(String.format("Received unknow result type '%s'; returning empty result set", msg.kind)); - return EMPTY; + return empty(trace); } } + private static ResultSet empty(QueryTrace trace) { + return trace == null ? EMPTY : new ResultSet(ColumnDefinitions.EMPTY, EMPTY_QUEUE, trace); + } + /** * The columns returned in this ResultSet. * @@ -135,6 +146,16 @@ public void remove() { }; } + /** + * The query trace if tracing was enabled on this query. + * + * @return the {@code QueryTrace} object for this query if tracing was + * enable for this query, or {@code null} otherwise. + */ + public QueryTrace getQueryTrace() { + return trace; + } + @Override public String toString() { StringBuilder sb = new StringBuilder(); @@ -201,7 +222,7 @@ public void onSet(Connection connection, Message.Response response) { } break; } - set(ResultSet.fromMessage(rm)); + set(ResultSet.fromMessage(rm, session)); break; case ERROR: setException(convertException(((ErrorMessage)response).error)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 30677913734..842efab8987 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -54,7 +54,7 @@ public class Session { * unauthorized or any other validation problem). */ public ResultSet execute(String query) throws NoHostAvailableException { - return execute(query, new QueryOptions()); + return execute(query, QueryOptions.DEFAULT); } /** @@ -121,7 +121,7 @@ public ResultSet execute(String query, QueryOptions options) throws NoHostAvaila * be empty (and will be for any non SELECT query). */ public ResultSet.Future executeAsync(String query) { - return executeAsync(query, new QueryOptions()); + return executeAsync(query, QueryOptions.DEFAULT); } /** @@ -176,7 +176,7 @@ public ResultSet.Future executeAsync(String query, QueryOptions options) { */ public PreparedStatement prepare(String query) throws NoHostAvailableException { Connection.Future future = new Connection.Future(new PrepareMessage(query)); - manager.execute(future, new QueryOptions()); + manager.execute(future, QueryOptions.DEFAULT); return toPreparedStatement(query, future); } @@ -519,6 +519,9 @@ public void prepare(String query, InetSocketAddress toExclude) { } public ResultSet.Future executeQuery(Message.Request msg, QueryOptions options) { + if (options.isTracing()) + msg.setTracingRequested(); + ResultSet.Future future = new ResultSet.Future(this, msg); execute(future.callback, options); return future; diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 5291287873a..b393831e17b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -157,31 +157,31 @@ public static void classSetUp() { // } //} - @Test - public void MultiNodeContinuousExecuteTest() throws Exception { - - Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - Session session = cluster.connect(); - - try { - session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // We should deal with that sleep - try { Thread.sleep(2000); } catch (Exception e) {} - session.execute("USE test_ks"); - session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - } catch (AlreadyExistsException e) { - // Skip if already exists - session.execute("USE test_ks"); - } - - //System.out.println("--- Schema ---\n" + cluster.getMetadata()); - - for (int i = 0; i < 10000; ++i) { - System.out.println(">> " + i); - session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - Thread.currentThread().sleep(1000); - } - } + //@Test + //public void MultiNodeContinuousExecuteTest() throws Exception { + + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + // Session session = cluster.connect(); + + // try { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + // // We should deal with that sleep + // try { Thread.sleep(2000); } catch (Exception e) {} + // session.execute("USE test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); + // } catch (AlreadyExistsException e) { + // // Skip if already exists + // session.execute("USE test_ks"); + // } + + // //System.out.println("--- Schema ---\n" + cluster.getMetadata()); + + // for (int i = 0; i < 10000; ++i) { + // System.out.println(">> " + i); + // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); + // Thread.currentThread().sleep(1000); + // } + //} //@Test //public void SchemaTest() throws Exception { @@ -206,4 +206,30 @@ public void MultiNodeContinuousExecuteTest() throws Exception { // Thread.currentThread().sleep(4000); // } //} + + //@Test + //public void TracingTest() throws Exception { + + // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + // Session session = cluster.connect(); + + // try { + // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); + // // We should deal with that sleep + // try { Thread.sleep(1000); } catch (Exception e) {} + // session.execute("USE test_ks"); + // session.execute("CREATE TABLE test (k text PRIMARY KEY, c int)"); + // } catch (Exception e) { + // // Skip if already created + // session.execute("USE test_ks"); + // } + + // ResultSet rs = session.execute("INSERT INTO test (k, c) VALUES ('k', 1)", new QueryOptions(ConsistencyLevel.QUORUM).setTracing()); + // QueryTrace qt = rs.getQueryTrace(); + + // Thread.sleep(100); + // System.out.println("Trace = " + rs.getQueryTrace()); + // for (QueryTrace.Event event : qt.getEvents()) + // System.out.println(" >> " + event); + //} } From db78f76152817f8b0d3c729fa50599fded8bf6d0 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 1 Nov 2012 15:52:40 +0100 Subject: [PATCH 061/719] Simplify API based on the fact we only support one port for the whole cluster --- .../com/datastax/driver/core/Cluster.java | 95 ++++++++----------- 1 file changed, 41 insertions(+), 54 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 91068504c41..a67ffe19369 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -2,6 +2,7 @@ import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.*; import java.util.concurrent.*; @@ -26,7 +27,7 @@ * This is the main entry point of the driver. A simple example of access to a * Cassandra cluster would be: *

- *   Cluster cluster = new Cluster.Builder().addContactPoints("192.168.0.1").build();
+ *   Cluster cluster = new Cluster.Builder().addContactPoint("192.168.0.1").build();
  *   Session session = cluster.connect("db1");
  *
  *   for (CQLRow row : session.execute("SELECT * FROM table1"))
@@ -66,12 +67,17 @@ private Cluster(List contactPoints, Policies policies) throws
      * 

* Note that for building a cluster programmatically, Cluster.Builder * provides a slightly less verbose shortcut with {@link Builder#build}. + *

+ * Also note that that all the contact points provided by {@code + * initializer} must share the same port. * * @param initializer the Cluster.Initializer to use * @return the newly created Cluster instance * * @throws NoHostAvailableException if no host amongst the contact points * can be reached. + * @throws IllegalArgumentException if not all the contact points provided + * by {@code initiazer} have the same port. */ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableException { return new Cluster(initializer.getContactPoints(), initializer.getPolicies()); @@ -173,61 +179,63 @@ public interface Initializer { */ public static class Builder implements Initializer { - private final List addresses = new ArrayList(); + private final List addresses = new ArrayList(); + private int port = DEFAULT_PORT; private LoadBalancingPolicy.Factory loadBalancingPolicyFactory; private ReconnectionPolicy.Factory reconnectionPolicyFactory; private RetryPolicy retryPolicy; public List getContactPoints() { - return addresses; + List cp = new ArrayList(addresses.size()); + for (InetAddress address : addresses) + cp.add(new InetSocketAddress(address, port)); + return cp; } /** - * Adds a contact point. + * The port to use to connect to the Cassandra host. * - * Contact points are addresses of Cassandra nodes that the driver uses - * to discover the cluster topology. Only one contact point is required - * (the driver will retrieve the address of the other nodes - * automatically), but it is usually a good idea to provide more than - * one contact point, as if that unique contact point is not available, - * the driver won't be able to initialize itself correctly. + * If not set through this method, the default port (9042) will be used + * instead. * - * @param address the address of the node to connect to - * @param port the port to connect to + * @param port the port to set. * @return this Builder - * - * @throws IllegalArgumentException if the port parameter is outside - * the range of valid port values, or if the hostname parameter is - * null. - * @throws SecurityException if a security manager is present and - * permission to resolve the host name is denied. */ - public Builder addContactPoint(String address, int port) { - this.addresses.add(new InetSocketAddress(address, port)); + public Builder withPort(int port) { + this.port = port; return this; } /** - * Add a contact point using the default Cassandra port. + * Adds a contact point. * - * See {@link Builder#addContactPoint} for more details on contact - * points. + * Contact points are addresses of Cassandra nodes that the driver uses + * to discover the cluster topology. Only one contact point is required + * (the driver will retrieve the address of the other nodes + * automatically), but it is usually a good idea to provide more than + * one contact point, as if that unique contact point is not available, + * the driver won't be able to initialize itself correctly. * - * @param address the address of the node to add as contact point + * @param address the address of the node to connect to * @return this Builder * + * @throws IllegalArgumentException if no IP address for {@code address} + * could be found * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. - * - * @see Builder#addContactPoint */ public Builder addContactPoint(String address) { - return addContactPoint(address, DEFAULT_PORT); + try { + this.addresses.add(InetAddress.getByName(address)); + return this; + } catch (UnknownHostException e) { + throw new IllegalArgumentException(e.getMessage()); + } } /** - * Add contact points using the default Cassandra port. + * Add contact points. * * See {@link Builder#addContactPoint} for more details on contact * points. @@ -235,6 +243,8 @@ public Builder addContactPoint(String address) { * @param addresses addresses of the nodes to add as contact point * @return this Builder * + * @throws IllegalArgumentException if no IP address for at least one + * of {@code addresses} could be found * @throws SecurityException if a security manager is present and * permission to resolve the host name is denied. * @@ -242,12 +252,12 @@ public Builder addContactPoint(String address) { */ public Builder addContactPoints(String... addresses) { for (String address : addresses) - addContactPoint(address, DEFAULT_PORT); + addContactPoint(address); return this; } /** - * Add contact points using the default Cassandra port. + * Add contact points. * * See {@link Builder#addContactPoint} for more details on contact * points. @@ -255,34 +265,11 @@ public Builder addContactPoints(String... addresses) { * @param addresses addresses of the nodes to add as contact point * @return this Builder * - * @throws SecurityException if a security manager is present and - * permission to resolve the host name is denied. - * * @see Builder#addContactPoint */ public Builder addContactPoints(InetAddress... addresses) { for (InetAddress address : addresses) - this.addresses.add(new InetSocketAddress(address, DEFAULT_PORT)); - return this; - } - - /** - * Add contact points. - * - * See {@link Builder#addContactPoint} for more details on contact - * points. - * - * @param addresses the socket addresses of the nodes to add as contact - * point - * @return this Builder - * - * @throws SecurityException if a security manager is present and - * permission to resolve the host name is denied. - * - * @see Builder#addContactPoint - */ - public Builder addContactPoints(InetSocketAddress... addresses) { - this.addresses.addAll(Arrays.asList(addresses)); + this.addresses.add(address); return this; } From bf23ac556ac592f570128157ccd4f10502c9d248 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 1 Nov 2012 16:20:57 +0100 Subject: [PATCH 062/719] Cleanups --- .../com/datastax/driver/core/Cluster.java | 31 ++++++++++++++----- .../com/datastax/driver/core/Connection.java | 7 +---- .../driver/core/ControlConnection.java | 3 +- .../com/datastax/driver/core/ResultSet.java | 2 -- .../driver/core/RetryingCallback.java | 1 - .../exceptions/QueryValidationException.java | 1 - .../core/exceptions/TruncateException.java | 5 +-- 7 files changed, 27 insertions(+), 23 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index a67ffe19369..8d55951215d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -58,8 +58,8 @@ public class Cluster { final Manager manager; - private Cluster(List contactPoints, Policies policies) throws NoHostAvailableException { - this.manager = new Manager(contactPoints, policies); + private Cluster(List contactPoints, int port, Policies policies) throws NoHostAvailableException { + this.manager = new Manager(contactPoints, port, policies); } /** @@ -76,11 +76,21 @@ private Cluster(List contactPoints, Policies policies) throws * * @throws NoHostAvailableException if no host amongst the contact points * can be reached. - * @throws IllegalArgumentException if not all the contact points provided - * by {@code initiazer} have the same port. + * @throws IllegalArgumentException if the list of contact points provided + * by {@code initiazer} is empty or if not all those contact points have the same port. */ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableException { - return new Cluster(initializer.getContactPoints(), initializer.getPolicies()); + List contactPoints = initializer.getContactPoints(); + if (contactPoints.isEmpty()) + throw new IllegalArgumentException("Cannot build a cluster without contact points"); + + int port = -1; + for (InetSocketAddress a : contactPoints) { + if (port != -1 && a.getPort() != port) + throw new IllegalArgumentException(String.format("Not all hosts have the same port, found port %d and %d", port, a.getPort())); + port = a.getPort(); + } + return new Cluster(contactPoints, port, initializer.getPolicies()); } /** @@ -149,6 +159,11 @@ public ClusterMetadata getMetadata() { return manager.metadata; } + /** + * The cluster configuration. + * + * @return the cluster configuration. + */ public Cluster.Configuration getConfiguration() { return manager.configuration; } @@ -391,6 +406,7 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // Initial contacts point final List contactPoints; + final int port; private final Set sessions = new CopyOnWriteArraySet(); final ClusterMetadata metadata; @@ -413,7 +429,8 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // less clear behavior. final Map preparedQueries = new ConcurrentHashMap(); - private Manager(List contactPoints, Policies policies) throws NoHostAvailableException { + private Manager(List contactPoints, int port, Policies policies) throws NoHostAvailableException { + this.port = port; this.configuration = new Configuration(policies); this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; @@ -561,7 +578,7 @@ public void run() { public void handle(Message.Response response) { if (!(response instanceof EventMessage)) { - // TODO: log some error + logger.error("Received an unexpected message from the server: " + response); return; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 363b1a517b2..f397dd1e916 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -255,8 +255,7 @@ public void close() { if (isClosed) return; - // TODO: put that to trace - logger.debug(String.format("[%s] closing connection", name)); + logger.trace(String.format("[%s] closing connection", name)); // Make sure all new writes are rejected isClosed = true; @@ -364,9 +363,6 @@ public DefaultResponseHandler defaultHandler() { } } - // TODO: Having a map of Integer -> ResponseHandler might be overkill if we - // use the connection synchronously. See if we want to support lighter - // dispatcher that assume synchronous? private class Dispatcher extends SimpleChannelUpstreamHandler { public final StreamIdGenerator streamIdHandler = new StreamIdGenerator(); @@ -430,7 +426,6 @@ public void errorOutAllHandler(ConnectionException ce) { } } - // TODO: Do we really need that after all? static class Future extends SimpleFuture implements ResponseCallback { private final Message.Request request; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 1978c0251e7..271b61b6e0b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -201,8 +201,7 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti for (CQLRow row : peersFuture.get()) { if (!row.isNull("peer")) { - // TODO: find what port people are using - foundHosts.add(new InetSocketAddress(row.getInet("peer"), Cluster.DEFAULT_PORT)); + foundHosts.add(new InetSocketAddress(row.getInet("peer"), cluster.port)); dcs.add(row.getString("data_center")); racks.add(row.getString("rack")); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index c51c9377ce6..e3fbfa478ff 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -347,10 +347,8 @@ static Exception convertException(org.apache.cassandra.exceptions.TransportExcep org.apache.cassandra.exceptions.UnavailableException ue = (org.apache.cassandra.exceptions.UnavailableException)te; return new UnavailableException(ConsistencyLevel.from(ue.consistency), ue.required, ue.alive); case OVERLOADED: - // TODO: Catch that so that we retry another node return new DriverInternalError("Queried host was overloaded; this shouldn't happen, another node should have been tried"); case IS_BOOTSTRAPPING: - // TODO: Catch that so that we retry another node return new DriverInternalError("Queried host was boostrapping; this shouldn't happen, another node should have been tried"); case TRUNCATE_ERROR: return new TruncateException(te.getMessage()); diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index dbad3528a0e..6ac8f42ff90 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -186,7 +186,6 @@ public void onSet(Connection connection, Message.Response response) { retry(false, null); return; case IS_BOOTSTRAPPING: - // TODO: log error as this shouldn't happen // Try another node logger.error("Query sent to %s but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); retry(false, null); diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java index 2d3322e6bbd..94aa8335a19 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/QueryValidationException.java @@ -6,7 +6,6 @@ */ public class QueryValidationException extends DriverUncheckedException { - // TODO: return the query with such exception protected QueryValidationException(String msg) { super(msg); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java index 9c153865989..79c1c2c2e94 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/TruncateException.java @@ -3,10 +3,7 @@ /** * Error during a truncation operation. */ -// TODO: should that extend QueryExecutionException. In theory yes, but that's -// probably not part of what you want to deal with when you catch -// QueryExecutionException? -public class TruncateException extends DriverUncheckedException { +public class TruncateException extends QueryExecutionException { public TruncateException(String msg) { super(msg); From 349d7fc36d0355de5772b67c59ff07c6bfa9676f Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 1 Nov 2012 17:35:33 +0100 Subject: [PATCH 063/719] Add some niceish readme --- README | 20 ++++++++++++++++--- TODO | 8 -------- driver-core/README | 50 ++++++++++++++++++++++++++++++++++++++++++++++ driver-core/TODO | 5 +++++ 4 files changed, 72 insertions(+), 11 deletions(-) delete mode 100644 TODO create mode 100644 driver-core/README create mode 100644 driver-core/TODO diff --git a/README b/README index 5040263fd02..b67ecbca5ef 100644 --- a/README +++ b/README @@ -1,4 +1,18 @@ -Cassandra Java Driver -===================== +Datastax Java Driver for Apache Cassandra +========================================= -A work in progress java driver using the binary protocol and the full power of CQL3. +A Java client driver for Apache Cassandra. This driver works exclusively with +the Cassandra Query Language version 3 (CQL3) and Cassandra's binary protocol. + +The driver architecture is based on layers. At the bottom lies the driver core. +This core handles everything related to the connections to the Cassandra +cluster (connection pool, discovering new nodes, ...) and exposes a simple, +relatively low-level, API on top of which higher level layer can build. + +The driver contains the following modules: + - driver-core: the core layer. + - driver-mapping: ... + - driver-examples: example applications using the other modules. Those are + only meant for demonstration purposes. + +Please refer to the README of each module for more information. diff --git a/TODO b/TODO deleted file mode 100644 index 57a41362863..00000000000 --- a/TODO +++ /dev/null @@ -1,8 +0,0 @@ -TODO: -===== - -Everything, but in particular: -- Built system -- Make it work -- Tests -- Documentation diff --git a/driver-core/README b/driver-core/README new file mode 100644 index 00000000000..18aa8280f30 --- /dev/null +++ b/driver-core/README @@ -0,0 +1,50 @@ +Driver Core +=========== + +The core module of the Datastax Java Driver for Apache Cassandra (C*). This +module offers a simple (as in, not abstracted) but complete API to work with +CQL3. The main goal of this module is to handle all the functionality related +to managing connections to a Cassandra cluster (but leaving potentially higher +level abstraction like object mapping to separate modules). + +Prerequisite +------------ + +This driver uses the binary protocol that will be introduced in C* 1.2. +This will thus only work with a version of C* >= 1.2. Since at the time of this +writing C* 1.2 hasn't been released yet, you will have to at least use the +beta2 release. Furthermore, the binary protocol server is not started with the +default configuration file coming with Cassandra 1.2, so in the cassandra.yaml +file, you will have to set at least: + start_native_transport: true + + +Installing +---------- + +At the time of this writing, the driver has not been released yet, so you will +have to compile it manually. The build system is maven and should work as any +other maven project. + +Getting Started +--------------- + +Suppose you have a Cassandra cluster running on 3 nodes whose hostnames are: +cass1, cass2 and cass3. A simple example using this core driver could be: + + Cluster cluster = new Cluster.Builder().addContactPoints("cass1", "cass2").build(); + Session session = cluster.connect("db1"); + + for (CQLRow row : session.execute("SELECT * FROM table1")) + // do something ... + + +Please note that when we build the Cluster object, we only provide the address +to 2 Cassandra hosts. We could have provided only one host or the 3 of them, +this doesn't matter as long as the driver is able to contact one of the host +provided as "contact points". Even if only one host was provided, the driver +will use this host to discover the other ones and use the whole cluster +automtically. This is also true for new nodes joining the cluster. + +For now, please refer to the JavaDoc for more informations, more documentation +will come later. diff --git a/driver-core/TODO b/driver-core/TODO new file mode 100644 index 00000000000..13af3111e59 --- /dev/null +++ b/driver-core/TODO @@ -0,0 +1,5 @@ +TODO: + +- Add tests +- Add documentation +- Clean up dependencies (don't require the full Cassandra jar) From 0c9350081906132477792c237cef0eb9ad69e9ef Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 7 Nov 2012 15:41:59 +0100 Subject: [PATCH 064/719] Add shutdown method and some real tests --- driver-core/pom.xml | 16 + .../datastax/driver/core/BoundStatement.java | 157 +++------- .../java/com/datastax/driver/core/CQLRow.java | 73 ++--- .../com/datastax/driver/core/Cluster.java | 36 ++- .../java/com/datastax/driver/core/Codec.java | 26 +- .../driver/core/ControlConnection.java | 16 +- .../com/datastax/driver/core/DataType.java | 2 +- .../com/datastax/driver/core/Session.java | 28 +- .../com/datastax/driver/core/CCMBridge.java | 196 +++++++++++++ .../driver/core/PreparedStatementTest.java | 165 +++++++++++ .../com/datastax/driver/core/SessionTest.java | 277 ++++-------------- .../com/datastax/driver/core/TestUtils.java | 228 ++++++++++++++ 12 files changed, 826 insertions(+), 394 deletions(-) create mode 100644 driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/TestUtils.java diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 7a4d84d7b01..d93f915c40f 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -43,6 +43,19 @@ + + + default + + default + cassandra-1.2.0-beta2 + + + true + + + + @@ -51,6 +64,9 @@ 2.5 false + + ${cassandra.version} + diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index b7e9077b5e1..afd2321ce95 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -109,7 +109,7 @@ public BoundStatement bind(Object... values) { // Ugly? Yes Class klass = l.get(0).getClass(); DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); - if (!Codec.isCompatible(eltType, klass)) + if (!Codec.isCompatibleSupertype(eltType, klass)) throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided list value are %s", i, columnType, klass)); } break; @@ -123,7 +123,7 @@ public BoundStatement bind(Object... values) { // Ugly? Yes Class klass = s.iterator().next().getClass(); DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); - if (!Codec.isCompatible(eltType, klass)) + if (!Codec.isCompatibleSupertype(eltType, klass)) throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided set value are %s", i, columnType, klass)); } break; @@ -142,14 +142,14 @@ public BoundStatement bind(Object... values) { DataType.Collection.Map mapType = (DataType.Collection.Map)columnType; DataType.Native keysType = (DataType.Native)mapType.getKeysType(); DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); - if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + if (!Codec.isCompatibleSupertype(keysType, keysClass) || !Codec.isCompatibleSupertype(valuesType, valuesClass)) throw new InvalidTypeException(String.format("Invalid type for value %d, column type %s conflicts with provided type %s", i, mapType, toSet.getClass())); } break; } } else { - if (!Codec.isCompatible(columnType.asNative(), toSet.getClass())) + if (!Codec.isCompatibleSupertype(columnType.asNative(), toSet.getClass())) throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but %s provided", i, columnType, toSet.getClass())); } setValue(i, Codec.getCodec(columnType).decompose(toSet)); @@ -189,28 +189,12 @@ public BoundStatement setBool(String name, boolean v) { * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - * @throws InvalidTypeException if column {@code i} is of neither of the - * following types: INT, TIMESTAMP, BIGINT, COUNTER or VARINT. + * @throws InvalidTypeException if column {@code i} is not of type INT. */ public BoundStatement setInt(int i, int v) { - DataType.Native type = metadata().checkType(i, DataType.Native.INT, - DataType.Native.TIMESTAMP, - DataType.Native.BIGINT, - DataType.Native.COUNTER, - DataType.Native.VARINT); + DataType.Native type = metadata().checkType(i, DataType.Native.INT); - switch (type) { - case INT: - return setValue(i, Int32Type.instance.decompose(v)); - case TIMESTAMP: - case BIGINT: - case COUNTER: - return setValue(i, LongType.instance.decompose((long)v)); - case VARINT: - return setValue(i, IntegerType.instance.decompose(BigInteger.valueOf((long)v))); - default: - throw new AssertionError(); - } + return setValue(i, Int32Type.instance.decompose(v)); } /** @@ -220,8 +204,7 @@ public BoundStatement setInt(int i, int v) { * * @throws IllegalArgumentException if {@code name} is not a prepared * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is of neither of the - * following types: INT, TIMESTAMP, BIGINT, COUNTER or VARINT. + * @throws InvalidTypeException if column {@code i} is not of type INT. */ public BoundStatement setInt(String name, int v) { return setInt(metadata().getIdx(name), v); @@ -233,25 +216,11 @@ public BoundStatement setInt(String name, int v) { * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - * @throws InvalidTypeException if column {@code i} is of neither of the - * following types: BIGINT, TIMESTAMP, COUNTER or VARINT. + * @throws InvalidTypeException if column {@code i} is of type BIGINT or COUNTER. */ public BoundStatement setLong(int i, long v) { - DataType.Native type = metadata().checkType(i, DataType.Native.BIGINT, - DataType.Native.TIMESTAMP, - DataType.Native.COUNTER, - DataType.Native.VARINT); - - switch (type) { - case TIMESTAMP: - case BIGINT: - case COUNTER: - return setValue(i, LongType.instance.decompose(v)); - case VARINT: - return setValue(i, IntegerType.instance.decompose(BigInteger.valueOf(v))); - default: - throw new AssertionError(); - } + DataType.Native type = metadata().checkType(i, DataType.Native.BIGINT, DataType.Native.COUNTER); + return setValue(i, LongType.instance.decompose(v)); } /** @@ -261,8 +230,7 @@ public BoundStatement setLong(int i, long v) { * * @throws IllegalArgumentException if {@code name} is not a prepared * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is of neither of the - * following types: BIGINT, TIMESTAMP, COUNTER or VARINT. + * @throws InvalidTypeException if column {@code i} is of type BIGINT or COUNTER. */ public BoundStatement setLong(String name, long v) { return setLong(metadata().getIdx(name), v); @@ -300,24 +268,11 @@ public BoundStatement setDate(String name, Date v) { * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - * @throws InvalidTypeException if column {@code i} is of neither of the - * following types: FLOAT, DOUBLE or DECIMAL. + * @throws InvalidTypeException if column {@code i} is not of type FLOAT. */ public BoundStatement setFloat(int i, float v) { - DataType.Native type = metadata().checkType(i, DataType.Native.FLOAT, - DataType.Native.DOUBLE, - DataType.Native.DECIMAL); - - switch (type) { - case FLOAT: - return setValue(i, FloatType.instance.decompose(v)); - case DOUBLE: - return setValue(i, DoubleType.instance.decompose((double)v)); - case DECIMAL: - return setValue(i, DecimalType.instance.decompose(BigDecimal.valueOf((double)v))); - default: - throw new AssertionError(); - } + DataType.Native type = metadata().checkType(i, DataType.Native.FLOAT); + return setValue(i, FloatType.instance.decompose(v)); } /** @@ -327,8 +282,7 @@ public BoundStatement setFloat(int i, float v) { * * @throws IllegalArgumentException if {@code name} is not a prepared * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is of neither of the - * following types: FLOAT, DOUBLE or DECIMAL. + * @throws InvalidTypeException if column {@code i} is not of type FLOAT. */ public BoundStatement setFloat(String name, float v) { return setFloat(metadata().getIdx(name), v); @@ -340,20 +294,11 @@ public BoundStatement setFloat(String name, float v) { * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - * @throws InvalidTypeException if column {@code i} is of neither of the - * following types: DOUBLE or DECIMAL. + * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. */ public BoundStatement setDouble(int i, double v) { - DataType.Native type = metadata().checkType(i, DataType.Native.DOUBLE, - DataType.Native.DECIMAL); - switch (type) { - case DOUBLE: - return setValue(i, DoubleType.instance.decompose(v)); - case DECIMAL: - return setValue(i, DecimalType.instance.decompose(BigDecimal.valueOf(v))); - default: - throw new AssertionError(); - } + DataType.Native type = metadata().checkType(i, DataType.Native.DOUBLE); + return setValue(i, DoubleType.instance.decompose(v)); } /** @@ -363,8 +308,7 @@ public BoundStatement setDouble(int i, double v) { * * @throws IllegalArgumentException if {@code name} is not a prepared * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is of neither of the - * following types: DOUBLE or DECIMAL. + * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. */ public BoundStatement setDouble(String name, double v) { return setDouble(metadata().getIdx(name), v); @@ -413,16 +357,16 @@ public BoundStatement setString(String name, String v) { * * This method validate that the type of the column set is BLOB. If you * want to insert manually serialized data into columns of another type, - * use {@link #setByteBufferUnsafe} instead. + * use {@link #setBytesUnsafe} instead. * * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. * @throws InvalidTypeException if column {@code i} is not of type BLOB. */ - public BoundStatement setByteBuffer(int i, ByteBuffer v) { + public BoundStatement setBytes(int i, ByteBuffer v) { DataType.Native type = metadata().checkType(i, DataType.Native.BLOB); - return setByteBufferUnsafe(i, v); + return setBytesUnsafe(i, v); } /** @@ -430,7 +374,7 @@ public BoundStatement setByteBuffer(int i, ByteBuffer v) { * * This method validate that the type of the column set is BLOB. If you * want to insert manually serialized data into columns of another type, - * use {@link #setByteBufferUnsafe} instead. + * use {@link #setBytesUnsafe} instead. * * @return this BoundStatement. * @@ -438,14 +382,14 @@ public BoundStatement setByteBuffer(int i, ByteBuffer v) { * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. * @throws InvalidTypeException if column {@code name} is not of type BLOB. */ - public BoundStatement setByteBuffer(String name, ByteBuffer v) { - return setByteBuffer(metadata().getIdx(name), v); + public BoundStatement setBytes(String name, ByteBuffer v) { + return setBytes(metadata().getIdx(name), v); } /** * Set the {@code i}th value to the provided byte buffer. * - * Contrarily to {@link #setByteBuffer}, this method does not check the + * Contrarily to {@link #setBytes}, this method does not check the * type of the column set. If you insert data that is not compatible with * the type of the column, you will get an {@code InvalidQueryException} at * execute time. @@ -454,14 +398,14 @@ public BoundStatement setByteBuffer(String name, ByteBuffer v) { * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. */ - public BoundStatement setByteBufferUnsafe(int i, ByteBuffer v) { + public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { return setValue(i, v.duplicate()); } /** * Set the value for column {@code name} to the provided byte buffer. * - * Contrarily to {@link #setByteBuffer}, this method does not check the + * Contrarily to {@link #setBytes}, this method does not check the * type of the column set. If you insert data that is not compatible with * the type of the column, you will get an {@code InvalidQueryException} at * execute time. @@ -471,41 +415,8 @@ public BoundStatement setByteBufferUnsafe(int i, ByteBuffer v) { * @throws IllegalArgumentException if {@code name} is not a prepared * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. */ - public BoundStatement setByteBufferUnsafe(String name, ByteBuffer v) { - return setByteBufferUnsafe(metadata().getIdx(name), v); - } - - /** - * Set the {@code i}th value to the provided byte array. - * - * This method validate that the type of the column set is BLOB. If you - * want to insert manually serialized data into columns of another type, - * use {@link #setByteBufferUnsafe} instead. - * - * @return this BoundStatement. - * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. - * @throws InvalidTypeException if column {@code i} is not of type BLOB. - */ - public BoundStatement setBytes(int i, byte[] v) { - return setValue(i, ByteBuffer.wrap(v)); - } - - /** - * Set the value for column {@code name} to the provided byte array. - * - * This method validate that the type of the column set is BLOB. If you - * want to insert manually serialized data into columns of another type, - * use {@link #setByteBufferUnsafe} instead. - * - * @return this BoundStatement. - * - * @throws IllegalArgumentException if {@code name} is not a prepared - * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is not of type BLOB. - */ - public BoundStatement setBytes(String name, byte[] v) { - return setBytes(metadata().getIdx(name), v); + public BoundStatement setBytesUnsafe(String name, ByteBuffer v) { + return setBytesUnsafe(metadata().getIdx(name), v); } /** @@ -644,7 +555,7 @@ public BoundStatement setList(int i, List v) { Class klass = v.get(0).getClass(); DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); - if (!Codec.isCompatible(eltType, klass)) + if (!Codec.isCompatibleSupertype(eltType, klass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a list of %s", metadata().getName(i), type, klass)); } @@ -690,7 +601,7 @@ public BoundStatement setMap(int i, Map v) { DataType.Collection.Map mapType = (DataType.Collection.Map)type; DataType.Native keysType = (DataType.Native)mapType.getKeysType(); DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); - if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + if (!Codec.isCompatibleSupertype(keysType, keysClass) || !Codec.isCompatibleSupertype(valuesType, valuesClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a map of %s -> %s", metadata().getName(i), type, keysType, valuesType)); } @@ -732,7 +643,7 @@ public BoundStatement setSet(int i, Set v) { Class klass = v.iterator().next().getClass(); DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); - if (!Codec.isCompatible(eltType, klass)) + if (!Codec.isCompatibleSupertype(eltType, klass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a set of %s", metadata().getName(i), type, klass)); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index f9df27379a3..14756c5c87a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -143,22 +143,16 @@ public int getInt(String name) { * value is NULL, {@code 0L} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. - * @throws InvalidTypeException if column {@code i} type is not one of: BIGINT, TIMESTAMP, - * INT or COUNTER. + * @throws InvalidTypeException if column {@code i} is not of type BIGINT or COUNTER. */ public long getLong(int i) { - DataType type = metadata.checkType(i, DataType.Native.BIGINT, - DataType.Native.TIMESTAMP, - DataType.Native.INT, - DataType.Native.COUNTER); + DataType type = metadata.checkType(i, DataType.Native.BIGINT, DataType.Native.COUNTER); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) return 0L; - return type == DataType.Native.INT - ? (long)Int32Type.instance.compose(value) - : LongType.instance.compose(value); + return LongType.instance.compose(value); } /** @@ -170,8 +164,7 @@ public long getLong(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} type is not one of: BIGINT, TIMESTAMP, - * INT or COUNTER. + * @throws InvalidTypeException if column {@code i} is not of type BIGINT or COUNTER. */ public long getLong(String name) { return getLong(metadata.getIdx(name)); @@ -255,20 +248,16 @@ public float getFloat(String name) { * value is NULL, {@code 0.0} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. - * @throws InvalidTypeException if column {@code i} is not of type - * DOUBLE or FLOAT. + * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. */ public double getDouble(int i) { - DataType type = metadata.checkType(i, DataType.Native.DOUBLE, - DataType.Native.FLOAT); + DataType type = metadata.checkType(i, DataType.Native.DOUBLE); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) return 0.0; - return type == DataType.Native.FLOAT - ? (double)FloatType.instance.compose(value) - : DoubleType.instance.compose(value); + return DoubleType.instance.compose(value); } /** @@ -280,8 +269,7 @@ public double getDouble(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. - * @throws InvalidTypeException if column {@code name} is not of type - * DOUBLE or FLOAT. + * @throws InvalidTypeException if column {@code name} is not of type DOUBLE. */ public double getDouble(String name) { return getDouble(metadata.getIdx(name)); @@ -301,7 +289,7 @@ public double getDouble(String name) { * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. */ - public ByteBuffer getByteBuffer(int i) { + public ByteBuffer getBytesUnsafe(int i) { metadata.checkBounds(i); ByteBuffer value = data.get(i); @@ -326,38 +314,33 @@ public ByteBuffer getByteBuffer(int i) { * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. */ - public ByteBuffer getByteBuffer(String name) { - return getByteBuffer(metadata.getIdx(name)); + public ByteBuffer getBytesUnsafe(String name) { + return getBytesUnsafe(metadata.getIdx(name)); } /** * Returns the {@code i}th value of this row has a byte array. - * - * Note: this method always return the bytes composing the value, even if - * the column is not of type BLOB. That is, this method never throw an - * InvalidTypeException. However, if the type is not BLOB, it is up to the - * caller to handle the returned value correctly. + *

+ * Note that this method validate that the colum is of type BLOB. If you want to retrieve + * the bytes for any type of columns, use {@link #getBytesUnsafe(int)} instead. * * @param i the index of the column to retrieve. * @return the value of the {@code i}th column in this row as a byte array. If the * value is NULL, {@code null} is returned. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.columns().size()}. + * @throws InvalidTypeException if column {@code i} type is not of type BLOB. */ - public byte[] getBytes(int i) { - ByteBuffer bb = getByteBuffer(i); - byte[] result = new byte[bb.remaining()]; - bb.get(result); - return result; + public ByteBuffer getBytes(int i) { + DataType type = metadata.checkType(i, DataType.Native.BLOB); + return getBytesUnsafe(i); } /** * Returns the value of column {@code name} has a byte array. - * - * Note: this method always return the bytes composing the value, even if - * the column is not of type BLOB. That is, this method never throw an - * InvalidTypeException. However, if the type is not BLOB, it is up to the - * caller to handle the returned value correctly. + *

+ * Note that this method validate that the colum is of type BLOB. If you want to retrieve + * the bytes for any type of columns, use {@link #getBytesUnsafe(String)} instead. * * @param name the name of the column to retrieve. * @return the value of column {@code name} as a byte array. If the value is NULL, @@ -365,8 +348,9 @@ public byte[] getBytes(int i) { * * @throws IllegalArgumentException if {@code name} is not part of the * ResultSet this row is part of, i.e. if {@code !this.columns().names().contains(name)}. + * @throws InvalidTypeException if column {@code i} type is not of type BLOB. */ - public byte[] getBytes(String name) { + public ByteBuffer getBytes(String name) { return getBytes(metadata.getIdx(name)); } @@ -571,17 +555,12 @@ public InetAddress getInet(String name) { * elements are not of class {@code elementsClass}. */ public List getList(int i, Class elementsClass) { - // TODO: this is not as flexible as the methods above. For instance, - // with a list, one cannot ask for getList(i, Long.class). We - // might want to improve that, though that reach into the - // ListType.compose() method. - DataType type = metadata.getType(i); if (!(type instanceof DataType.Collection.List)) throw new InvalidTypeException(String.format("Column %s is not of list type", metadata.getName(i))); DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); - if (!Codec.isCompatible(eltType, elementsClass)) + if (!Codec.isCompatibleSubtype(eltType, elementsClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a list of %s", metadata.getName(i), type, elementsClass)); ByteBuffer value = data.get(i); @@ -631,7 +610,7 @@ public Set getSet(int i, Class elementsClass) { throw new InvalidTypeException(String.format("Column %s is not of set type", metadata.getName(i))); DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); - if (!Codec.isCompatible(eltType, elementsClass)) + if (!Codec.isCompatibleSubtype(eltType, elementsClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a set of %s", metadata.getName(i), type, elementsClass)); ByteBuffer value = data.get(i); @@ -684,7 +663,7 @@ public Map getMap(int i, Class keysClass, Class valuesClass) DataType.Collection.Map mapType = (DataType.Collection.Map)type; DataType.Native keysType = (DataType.Native)mapType.getKeysType(); DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); - if (!Codec.isCompatible(keysType, keysClass) || !Codec.isCompatible(valuesType, valuesClass)) + if (!Codec.isCompatibleSubtype(keysType, keysClass) || !Codec.isCompatibleSubtype(valuesType, valuesClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.getName(i), type, keysClass, valuesClass)); ByteBuffer value = data.get(i); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 8d55951215d..7b9e8c22ebc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -5,6 +5,7 @@ import java.net.UnknownHostException; import java.util.*; import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.cassandra.utils.MD5Digest; import org.apache.cassandra.transport.Event; @@ -168,6 +169,18 @@ public Cluster.Configuration getConfiguration() { return manager.configuration; } + /** + * Shutdown this cluster instance. + * + * This closes all connections from all the sessions of this {@code + * Cluster} instance and reclam all ressources used by it. + *

+ * This method has no effect if the cluster was already shutdown. + */ + public void shutdown() { + manager.shutdown(); + } + /** * Initializer for {@link Cluster} instances. */ @@ -397,7 +410,7 @@ public ConnectionsConfiguration getConnectionsConfiguration() { /** * The sessions and hosts managed by this a Cluster instance. - * + *

* Note: the reason we create a Manager object separate from Cluster is * that Manager is not publicly visible. For instance, we wouldn't want * user to be able to call the {@link #onUp} and {@link #onDown} methods. @@ -422,6 +435,8 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { final ExecutorService executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cassandra Java Driver worker")); + final AtomicBoolean isShutdown = new AtomicBoolean(false); + // All the queries that have been prepared (we keep them so we can // re-prepared them when a node fail or a new one join the cluster). // Note: we could move this down to the session level, but since @@ -449,6 +464,22 @@ private Session newSession() { return session; } + private void shutdown() { + if (!isShutdown.compareAndSet(false, true)) + return; + + logger.debug("Shutting down"); + + controlConnection.shutdown(); + + for (Session session : sessions) + session.shutdown(); + + reconnectionExecutor.shutdownNow(); + scheduledTasksExecutor.shutdownNow(); + executor.shutdownNow(); + } + public void onUp(Host host) { logger.trace(String.format("Host %s is UP", host)); @@ -567,6 +598,7 @@ private void prepareAllQueries(Host host) { // we wait for that to be finished. And maybe avoid multiple refresh at // the same time. public void submitSchemaRefresh(final String keyspace, final String table) { + logger.trace("Submitting schema refresh"); executor.submit(new Runnable() { public void run() { controlConnection.refreshSchema(keyspace, table); @@ -584,6 +616,8 @@ public void handle(Message.Response response) { final Event event = ((EventMessage)response).event; + logger.trace(String.format("Received event %s, scheduling delivery", response)); + // When handle is called, the current thread is a network I/O thread, and we don't want to block // it (typically addHost() will create the connection pool to the new node, which can take time) // Besides, events are usually sent a bit too early (since they're diff --git a/driver-core/src/main/java/com/datastax/driver/core/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java index 5793e6db53d..870a6190ad9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -112,7 +112,8 @@ public static DataType rawTypeToDataType(AbstractType rawType) { throw new DriverInternalError("Unknown type: " + rawType); } - public static boolean isCompatible(DataType.Native type, Class klass) { + // Returns whether type can be safely subtyped to klass + public static boolean isCompatibleSubtype(DataType.Native type, Class klass) { switch (type) { case ASCII: return klass.isAssignableFrom(String.class); case BIGINT: return klass.isAssignableFrom(Long.class); @@ -133,4 +134,27 @@ public static boolean isCompatible(DataType.Native type, Class klass) { default: throw new RuntimeException("Unknown native type"); } } + + // Returns whether klass can be safely subtyped to klass, i.e. if type is a supertype of klass + public static boolean isCompatibleSupertype(DataType.Native type, Class klass) { + switch (type) { + case ASCII: return String.class.isAssignableFrom(klass); + case BIGINT: return Long.class.isAssignableFrom(klass); + case BLOB: return ByteBuffer.class.isAssignableFrom(klass); + case BOOLEAN: return Boolean.class.isAssignableFrom(klass); + case COUNTER: return Long.class.isAssignableFrom(klass); + case DECIMAL: return BigDecimal.class.isAssignableFrom(klass); + case DOUBLE: return Double.class.isAssignableFrom(klass); + case FLOAT: return Float.class.isAssignableFrom(klass); + case INET: return InetAddress.class.isAssignableFrom(klass); + case INT: return Integer.class.isAssignableFrom(klass); + case TEXT: return String.class.isAssignableFrom(klass); + case TIMESTAMP: return Date.class.isAssignableFrom(klass); + case UUID: return UUID.class.isAssignableFrom(klass); + case VARCHAR: return String.class.isAssignableFrom(klass); + case VARINT: return BigInteger.class.isAssignableFrom(klass); + case TIMEUUID: return UUID.class.isAssignableFrom(klass); + default: throw new RuntimeException("Unknown native type"); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 271b61b6e0b..fc14dc66821 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -37,6 +37,8 @@ class ControlConnection implements Host.StateListener { private final ReconnectionPolicy.Factory reconnectionPolicyFactory = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); private final AtomicReference reconnectionAttempt = new AtomicReference(); + private volatile boolean isShutdown; + public ControlConnection(Cluster.Manager cluster) { this.cluster = cluster; this.balancingPolicy = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE.create(cluster.metadata.allHosts()); @@ -44,10 +46,22 @@ public ControlConnection(Cluster.Manager cluster) { // Only for the initial connection. Does not schedule retries if it fails public void connect() throws NoHostAvailableException { + if (isShutdown) + return; setNewConnection(reconnectInternal()); } + public void shutdown() { + isShutdown = true; + Connection connection = connectionRef.get(); + if (connection != null) + connection.close(); + } + private void reconnect() { + if (isShutdown) + return; + try { setNewConnection(reconnectInternal()); } catch (NoHostAvailableException e) { @@ -105,7 +119,7 @@ private Connection reconnectInternal() throws NoHostAvailableException { } } } - throw new NoHostAvailableException(errors == null ? Collections.emptyMap(): errors); + throw new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors); } private Connection tryConnect(Host host) throws ConnectionException { diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index 2d9c7163ee5..545e3501efe 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -17,7 +17,7 @@ public interface DataType { * * The {@code COLLECTION} types are the maps, lists and sets. */ - public enum Kind { NATIVE, COLLECTION, CUSTOM } + public enum Kind { NATIVE, COLLECTION } /** * Returns whether this type is a collection type. diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 842efab8987..9af0bea4994 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -3,6 +3,7 @@ import java.net.InetSocketAddress; import java.util.*; import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.configuration.*; @@ -298,7 +299,7 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt, ConsistencyLev /** * Execute a prepared statement that had values provided for its bound * variables asynchronously. - * + *

* This method performs like {@link #executeAsync} but for prepared * statements. It return as soon as the query has been successfully sent to * the database. @@ -318,6 +319,20 @@ public ResultSet.Future executePreparedAsync(BoundStatement stmt, QueryOptions q return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values), ConsistencyLevel.toCassandraCL(queryOptions.getConsistencyLevel())), queryOptions); } + /** + * Shutdown this session instance. + *

+ * This closes all connections used by this sessions. Note that if you want + * to shutdown the full {@code Cluster} instance this session is part of, + * you should use {@link Cluster#shutdown} instead (which will call this + * method for all session but also release some additional ressources). + *

+ * This method has no effect if the session was already shutdown. + */ + public void shutdown() { + manager.shutdown(); + } + private PreparedStatement toPreparedStatement(String query, Connection.Future future) throws NoHostAvailableException { try { @@ -369,6 +384,8 @@ static class Manager implements Host.StateListener { final HostConnectionPool.PoolState poolsState; + final AtomicBoolean isShutdown = new AtomicBoolean(false); + public Connection.Factory connectionFactory() { return cluster.manager.connectionFactory; } @@ -393,6 +410,15 @@ public Manager(Cluster cluster, Collection hosts) { addHost(host); } + private void shutdown() { + + if (!isShutdown.compareAndSet(false, true)) + return; + + for (HostConnectionPool pool : pools.values()) + pool.shutdown(); + } + private HostConnectionPool addHost(Host host) { try { HostDistance distance = loadBalancer.distance(host); diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java new file mode 100644 index 00000000000..a91be0a42ac --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -0,0 +1,196 @@ +package com.datastax.driver.core; + +import java.io.*; +import java.util.*; + +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.TestUtils.*; + +import com.google.common.io.Files; + +import org.apache.log4j.ConsoleAppender; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; + +public class CCMBridge { + + static { + Logger rootLogger = Logger.getRootLogger(); + rootLogger.setLevel(Level.INFO); + rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); + } + + private static final Logger logger = Logger.getLogger(CCMBridge.class); + + private static final String CASSANDRA_VERSION_REGEXP = "cassandra-\\d\\.\\d\\.\\d(-\\w+)?"; + + private static final File CASSANDRA_DIR; + private static final String CASSANDRA_VERSION; + static { + String version = System.getProperty("cassandra.version"); + if (version.matches(CASSANDRA_VERSION_REGEXP)) { + CASSANDRA_DIR = null; + CASSANDRA_VERSION = "-v " + version; + } else { + CASSANDRA_DIR = new File(version); + CASSANDRA_VERSION = ""; + } + } + + private final Runtime runtime = Runtime.getRuntime(); + private final File ccmDir; + + private CCMBridge() + { + this.ccmDir = Files.createTempDir(); + } + + public static CCMBridge create(String name) { + CCMBridge bridge = new CCMBridge(); + bridge.execute("ccm create %s -b %s", name, CASSANDRA_VERSION); + // Small sleep, otherwise the cluster is not always available + try { Thread.sleep(200); } catch (InterruptedException e) {} + return bridge; + } + + public static CCMBridge create(String name, int nbNodes) { + CCMBridge bridge = new CCMBridge(); + bridge.execute("ccm create %s -n %d -s -b %s", name, nbNodes, CASSANDRA_VERSION); + // Small sleep, otherwise the cluster is not always available + try { Thread.sleep(200); } catch (InterruptedException e) {} + return bridge; + } + + public void start() { + execute("ccm start"); + } + + public void stop() { + execute("ccm stop"); + } + + public void start(int n) { + execute("ccm node%d start", n); + } + + public void stop(int n) { + execute("ccm node%d stop", n); + } + + public void remove() { + stop(); + execute("ccm remove"); + } + + public void bootstrapNode(int n) { + execute("ccm add node%d -i 127.0.0.%d -s; ccm start", n, n); + } + + private void execute(String command, Object... args) { + + try { + Process p = runtime.exec(String.format(command, args) + " --config-dir=" + ccmDir, null, CASSANDRA_DIR); + int retValue = p.waitFor(); + + if (retValue != 0) { + // TODO: try to gather what the problem was + BufferedReader outReader = new BufferedReader(new InputStreamReader(p.getInputStream())); + BufferedReader errReader = new BufferedReader(new InputStreamReader(p.getErrorStream())); + + String line = outReader.readLine(); + while (line != null) { + logger.info("out> " + line); + line = outReader.readLine(); + } + line = errReader.readLine(); + while (line != null) { + logger.error("err> " + line); + line = errReader.readLine(); + } + throw new RuntimeException(); + } + } catch (IOException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + public static abstract class PerClassSingleNodeCluster { + + protected static CCMBridge cassandraCluster; + private static boolean erroredOut; + private static boolean schemaCreated; + + protected static Cluster cluster; + protected static Session session; + + protected abstract Collection getTableDefinitions(); + + @BeforeClass + public static void createCluster() { + erroredOut = false; + schemaCreated = false; + cassandraCluster = CCMBridge.create("test", 1); + try { + cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + session = cluster.connect(); + } catch (NoHostAvailableException e) { + erroredOut = true; + throw new RuntimeException(e); + } + } + + @AfterClass + public static void discardCluster() { + if (cluster != null) + cluster.shutdown(); + + if (cassandraCluster == null) { + logger.error("No cluster to discard"); + } else if (erroredOut) { + cassandraCluster.stop(); + logger.info("Error during tests, kept C* logs in " + cassandraCluster.ccmDir); + } else { + cassandraCluster.remove(); + cassandraCluster.ccmDir.delete(); + } + } + + + @Before + public void maybeCreateSchema() throws NoHostAvailableException { + + try { + if (schemaCreated) + return; + + try { + session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, SIMPLE_KEYSPACE, 1)); + } catch (AlreadyExistsException e) { + // It's ok, ignore + } + + session.execute("USE " + SIMPLE_KEYSPACE); + + for (String tableDef : getTableDefinitions()) { + try { + session.execute(tableDef); + } catch (AlreadyExistsException e) { + // It's ok, ignore + } + } + + schemaCreated = true; + } catch (NoHostAvailableException e) { + erroredOut = true; + throw e; + } + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java new file mode 100644 index 00000000000..1fad5dc8069 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -0,0 +1,165 @@ +package com.datastax.driver.core; + +import java.util.*; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import static org.junit.Assert.*; + +import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.TestUtils.*; + +/** + * Prepared statement tests. + * + * Note: this class also happens to test all the get methods from CQLRow. + */ +public class PreparedStatementTest extends CCMBridge.PerClassSingleNodeCluster { + + private static final String ALL_NATIVE_TABLE = "all_native"; + private static final String ALL_LIST_TABLE = "all_list"; + private static final String ALL_SET_TABLE = "all_set"; + private static final String ALL_MAP_TABLE = "all_map"; + + protected Collection getTableDefinitions() { + + List defs = new ArrayList(4); + + StringBuilder sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_NATIVE_TABLE).append(" (k text PRIMARY KEY"); + for (DataType.Native type : DataType.Native.values()) { + // This must be handled separatly + if (type == DataType.Native.COUNTER) + continue; + sb.append(", c_").append(type).append(" ").append(type); + } + sb.append(")"); + defs.add(sb.toString()); + + sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_LIST_TABLE).append(" (k text PRIMARY KEY"); + for (DataType.Native type : DataType.Native.values()) { + // This must be handled separatly + if (type == DataType.Native.COUNTER) + continue; + sb.append(", c_list_").append(type).append(" list<").append(type).append(">"); + } + sb.append(")"); + defs.add(sb.toString()); + + sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_SET_TABLE).append(" (k text PRIMARY KEY"); + for (DataType.Native type : DataType.Native.values()) { + // This must be handled separatly + if (type == DataType.Native.COUNTER) + continue; + sb.append(", c_set_").append(type).append(" set<").append(type).append(">"); + } + sb.append(")"); + defs.add(sb.toString()); + + sb = new StringBuilder(); + sb.append("CREATE TABLE ").append(ALL_MAP_TABLE).append(" (k text PRIMARY KEY"); + for (DataType.Native keyType : DataType.Native.values()) { + // This must be handled separatly + if (keyType == DataType.Native.COUNTER) + continue; + + for (DataType.Native valueType : DataType.Native.values()) { + // This must be handled separatly + if (valueType == DataType.Native.COUNTER) + continue; + sb.append(", c_map_").append(keyType).append("_").append(valueType).append(" map<").append(keyType).append(",").append(valueType).append(">"); + } + } + sb.append(")"); + defs.add(sb.toString()); + + return defs; + } + + @Test + public void preparedNativeTest() throws NoHostAvailableException { + // Test preparing/bounding for all native types + for (DataType.Native type : DataType.Native.values()) { + // This must be handled separatly + if (type == DataType.Native.COUNTER) + continue; + + String name = "c_" + type; + PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_native', ?)", ALL_NATIVE_TABLE, name)); + BoundStatement bs = ps.newBoundStatement(); + session.executePrepared(setBoundValue(bs, name, type, getFixedValue(type))); + + CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)).fetchOne(); + assertEquals("For type " + type, getFixedValue(type), getValue(row, name, type)); + } + } + + @Test + public void prepareListTest() throws NoHostAvailableException { + // Test preparing/bounding for all possible list types + for (DataType.Native rawType : DataType.Native.values()) { + // This must be handled separatly + if (rawType == DataType.Native.COUNTER) + continue; + + String name = "c_list_" + rawType; + DataType type = new DataType.Collection.List(rawType); + List value = (List)getFixedValue(type);; + PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); + BoundStatement bs = ps.newBoundStatement(); + session.executePrepared(setBoundValue(bs, name, type, value)); + + CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)).fetchOne(); + assertEquals("For type " + type, value, getValue(row, name, type)); + } + } + + @Test + public void prepareSetTest() throws NoHostAvailableException { + // Test preparing/bounding for all possible set types + for (DataType.Native rawType : DataType.Native.values()) { + // This must be handled separatly + if (rawType == DataType.Native.COUNTER) + continue; + + String name = "c_set_" + rawType; + DataType type = new DataType.Collection.Set(rawType); + Set value = (Set)getFixedValue(type);; + PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); + BoundStatement bs = ps.newBoundStatement(); + session.executePrepared(setBoundValue(bs, name, type, value)); + + CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)).fetchOne(); + assertEquals("For type " + type, value, getValue(row, name, type)); + } + } + + @Test + public void prepareMapTest() throws NoHostAvailableException { + // Test preparing/bounding for all possible map types + for (DataType.Native rawKeyType : DataType.Native.values()) { + // This must be handled separatly + if (rawKeyType == DataType.Native.COUNTER) + continue; + + for (DataType.Native rawValueType : DataType.Native.values()) { + // This must be handled separatly + if (rawValueType == DataType.Native.COUNTER) + continue; + + String name = "c_map_" + rawKeyType + "_" + rawValueType; + DataType type = new DataType.Collection.Map(rawKeyType, rawValueType); + Map value = (Map)getFixedValue(type);; + PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); + BoundStatement bs = ps.newBoundStatement(); + session.executePrepared(setBoundValue(bs, name, type, value)); + + CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)).fetchOne(); + assertEquals("For type " + type, value, getValue(row, name, type)); + } + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index b393831e17b..b029151c578 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -4,232 +4,71 @@ import org.junit.BeforeClass; import org.junit.Test; -import static junit.framework.Assert.*; - -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; +import static org.junit.Assert.*; import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.TestUtils.*; -public class SessionTest { - - @BeforeClass - public static void classSetUp() { - Logger rootLogger = Logger.getRootLogger(); - rootLogger.setLevel(Level.TRACE); - rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); - } - - //@Test - //public void MultiNodeExecuteTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1", "127.0.0.2").build(); - // Session session = cluster.connect(); - - // session.execute("CREATE KEYSPACE test_ks WITH strategy_class = SimpleStrategy AND strategy_options:replication_factor = 1"); - // session.use("test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - - // ResultSet rs; - - // rs = session.execute("INSERT INTO test (k, i, f) VALUES ('foo', 0, 0.2)"); - // assertTrue(rs.isExhausted()); - - // rs = session.execute("INSERT INTO test (k, i, f) VALUES ('bar', 1, 3.4)"); - // assertTrue(rs.isExhausted()); - - // rs = session.execute("SELECT * FROM test"); - // List l = rs.fetchAll(); - // assertEquals(2, l.size()); - - // CQLRow r; - // r = l.get(0); - // assertEquals("bar", r.getString(0)); - // assertEquals("bar", r.getString("k")); - // assertEquals(1, r.getInt("i")); - // assertEquals(3.4, r.getFloat("f"), 0.01); - - // r = l.get(1); - // assertEquals("foo", r.getString("k")); - // assertEquals(0, r.getInt("i")); - // assertEquals(0.2, r.getFloat("f"), 0.01); - //} - - //@Test - //public void PreparedStatementTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoint("localhost").build(); - // Session session = cluster.connect(); - - // try - // { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // session.execute("USE test_ks"); - // session.execute("CREATE TABLE test_2 (k text, i int, f float, PRIMARY KEY(k, i))"); - // } catch (Exception e) { - // // Skip if already created - // session.execute("USE test_ks"); - // } - - // PreparedStatement insertStmt = session.prepare("INSERT INTO test_2 (k, i, f) VALUES (?, ?, ?)"); - // PreparedStatement selectStmt = session.prepare("SELECT * FROM test_2 WHERE k = ?"); - - // ResultSet rs; - // BoundStatement bs; - - // bs = insertStmt.newBoundStatement().setString(0, "prep").setInt("i", 1).setFloat(2, 0.1f); - // rs = session.executePrepared(bs); - - // bs = insertStmt.newBoundStatement().setString(0, "prep").setFloat("f", 0.2f).setInt(1, 2); - // rs = session.executePrepared(bs); - - // session.executePrepared(insertStmt.bind("prep", 3, 42.0f)); - - // bs = selectStmt.newBoundStatement().setString("k", "prep"); - // rs = session.executePrepared(bs); - - // List l = rs.fetchAll(); - // assertEquals(3, l.size()); - - // CQLRow r; - // r = l.get(0); - // assertEquals("prep", r.getString(0)); - // assertEquals(1, r.getInt("i")); - // assertEquals(0.1, r.getFloat("f"), 0.01); +/** + * Simple test of the Sessions methods against a one node cluster. + */ +public class SessionTest extends CCMBridge.PerClassSingleNodeCluster { - // r = l.get(1); - // assertEquals("prep", r.getString("k")); - // assertEquals(2, r.getInt("i")); - // assertEquals(0.2, r.getFloat("f"), 0.01); + private static final String TABLE = "test"; - // r = l.get(2); - // assertEquals("prep", r.getString("k")); - // assertEquals(3, r.getInt("i")); - // assertEquals(42.0f, r.getFloat("f"), 0.01); - //} + private static final String INSERT_FORMAT = "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)"; + private static final String SELECT_ALL_FORMAT = "SELECT * FROM %s"; - //@Test - //public void CollectionsTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - // Session session = cluster.connect(); - - // try { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // // We should deal with that sleep - // try { Thread.sleep(1000); } catch (Exception e) {} - // session.execute("USE test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, l list, s set, m map)"); - // } catch (Exception e) { - // // Skip if already created - // session.execute("USE test_ks"); - // } - - // session.execute("INSERT INTO test (k, l, s, m) VALUES ('k', [3, 2, 1], { 3, 2, 1}, { 1349286846012 : 2 })"); - // for (CQLRow row : session.execute("SELECT * FROM test")) { - // List l = row.getList("l", Integer.class); - // Set s = row.getSet("s", String.class); - // Map m = row.getMap("m", Date.class, Integer.class); - - // System.out.println("l = " + l); - // System.out.println("s = " + s); - // System.out.println("m = " + m); - // } - - // System.out.println("-------"); - - // BoundStatement stmt = session.prepare("INSERT INTO test (k, l, s, m) VALUES ('k2', ?, ?, ?)").newBoundStatement(); - // stmt.setList(0, Arrays.asList(new Integer[]{ 5, 4, 3, 2, 1 })); - // stmt.setSet(1, new HashSet(Arrays.asList(new String[]{ "5", "4", "3", "2", "1" }))); - // stmt.setMap(2, new HashMap(){{ put(new Date(1349286846012L), 4); }}); - // session.executePrepared(stmt); - - // for (CQLRow row : session.execute("SELECT * FROM test WHERE k = 'k2'")) { - // List l = row.getList("l", Integer.class); - // Set s = row.getSet("s", String.class); - // Map m = row.getMap("m", Date.class, Integer.class); - - // System.out.println("l = " + l); - // System.out.println("s = " + s); - // System.out.println("m = " + m); - // } - //} - - //@Test - //public void MultiNodeContinuousExecuteTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - // Session session = cluster.connect(); - - // try { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // // We should deal with that sleep - // try { Thread.sleep(2000); } catch (Exception e) {} - // session.execute("USE test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - // } catch (AlreadyExistsException e) { - // // Skip if already exists - // session.execute("USE test_ks"); - // } - - // //System.out.println("--- Schema ---\n" + cluster.getMetadata()); - - // for (int i = 0; i < 10000; ++i) { - // System.out.println(">> " + i); - // session.execute(String.format("INSERT INTO test (k, i, f) VALUES ('k%d', %d, %d.2)", i, i, i)); - // Thread.currentThread().sleep(1000); - // } - //} - - //@Test - //public void SchemaTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - // Session session = cluster.connect(); - - // try { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); - // // We should deal with that sleep - // try { Thread.sleep(2000); } catch (Exception e) {} - // session.execute("USE test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, i int, f float)"); - // } catch (AlreadyExistsException e) { - // // Skip if already exists - // session.execute("USE test_ks"); - // } - - // for (int i = 0; i < 10000; ++i) { - // System.out.println("--- Schema " + i + " ---"); - // System.out.println(cluster.getMetadata().getKeyspace("test_ks").exportAsString()); - // Thread.currentThread().sleep(4000); - // } - //} - - //@Test - //public void TracingTest() throws Exception { - - // Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); - // Session session = cluster.connect(); + protected Collection getTableDefinitions() { + return Collections.singleton(String.format("CREATE TABLE %s (k text PRIMARY KEY, t text, i int, f float)", TABLE)); + } - // try { - // session.execute("CREATE KEYSPACE test_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 2 }"); - // // We should deal with that sleep - // try { Thread.sleep(1000); } catch (Exception e) {} - // session.execute("USE test_ks"); - // session.execute("CREATE TABLE test (k text PRIMARY KEY, c int)"); - // } catch (Exception e) { - // // Skip if already created - // session.execute("USE test_ks"); - // } + @Test + public void executeTest() throws Exception { + // Simple calls to all versions of the execute/executeAsync methods + String key = "execute_test"; + ResultSet rs = session.execute(String.format(INSERT_FORMAT, TABLE, key, "foo", 42, 24.03f)); + assertTrue(rs.isExhausted()); + + // execute + checkExecuteResultSet(session.execute(String.format(SELECT_ALL_FORMAT, TABLE)), key); + checkExecuteResultSet(session.execute(String.format(SELECT_ALL_FORMAT, TABLE), ConsistencyLevel.ONE), key); + checkExecuteResultSet(session.execute(String.format(SELECT_ALL_FORMAT, TABLE), new QueryOptions(ConsistencyLevel.ONE)), key); + + // executeAsync + checkExecuteResultSet(session.executeAsync(String.format(SELECT_ALL_FORMAT, TABLE)).getUninterruptibly(), key); + checkExecuteResultSet(session.executeAsync(String.format(SELECT_ALL_FORMAT, TABLE), ConsistencyLevel.ONE).getUninterruptibly(), key); + checkExecuteResultSet(session.executeAsync(String.format(SELECT_ALL_FORMAT, TABLE), new QueryOptions(ConsistencyLevel.ONE)).getUninterruptibly(), key); + } - // ResultSet rs = session.execute("INSERT INTO test (k, c) VALUES ('k', 1)", new QueryOptions(ConsistencyLevel.QUORUM).setTracing()); - // QueryTrace qt = rs.getQueryTrace(); + @Test + public void executePreparedTest() throws Exception { + // Simple calls to all versions of the executePrepared/executePreparedAsync methods + String key = "execute_prepared_test"; + ResultSet rs = session.execute(String.format(INSERT_FORMAT, TABLE, key, "foo", 42, 24.03f)); + assertTrue(rs.isExhausted()); + + PreparedStatement p = session.prepare(String.format(SELECT_ALL_FORMAT + " WHERE k = ?", TABLE)); + BoundStatement bs = p.bind(key); + + // executePrepared + checkExecuteResultSet(session.executePrepared(bs), key); + checkExecuteResultSet(session.executePrepared(bs, ConsistencyLevel.ONE), key); + checkExecuteResultSet(session.executePrepared(bs, new QueryOptions(ConsistencyLevel.ONE)), key); + + // executePreparedAsync + checkExecuteResultSet(session.executePreparedAsync(bs).getUninterruptibly(), key); + checkExecuteResultSet(session.executePreparedAsync(bs, ConsistencyLevel.ONE).getUninterruptibly(), key); + checkExecuteResultSet(session.executePreparedAsync(bs, new QueryOptions(ConsistencyLevel.ONE)).getUninterruptibly(), key); + } - // Thread.sleep(100); - // System.out.println("Trace = " + rs.getQueryTrace()); - // for (QueryTrace.Event event : qt.getEvents()) - // System.out.println(" >> " + event); - //} + private static void checkExecuteResultSet(ResultSet rs, String key) { + assertTrue(!rs.isExhausted()); + CQLRow row = rs.fetchOne(); + assertTrue(rs.isExhausted()); + assertEquals(key, row.getString("k")); + assertEquals("foo", row.getString("t")); + assertEquals(42, row.getInt("i")); + assertEquals(24.03f, row.getFloat("f"), 0.1f); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java new file mode 100644 index 00000000000..ab87195f414 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -0,0 +1,228 @@ +package com.datastax.driver.core; + +import java.math.*; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +/** + * A number of static fields/methods handy for tests. + */ +public abstract class TestUtils { + + public static final String CREATE_KEYSPACE_SIMPLE_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : %d }"; + + public static final String SIMPLE_KEYSPACE = "ks"; + + public static BoundStatement setBoundValue(BoundStatement bs, String name, DataType type, Object value) { + if (type.isCollection()) { + switch (type.asCollection().getKind()) { + case LIST: + bs.setList(name, (List)value); + break; + case SET: + bs.setSet(name, (Set)value); + break; + case MAP: + bs.setMap(name, (Map)value); + break; + } + + } else { + switch (type.asNative()) { + case ASCII: + bs.setString(name, (String)value); + break; + case BIGINT: + bs.setLong(name, (Long)value); + break; + case BLOB: + bs.setBytes(name, (ByteBuffer)value); + break; + case BOOLEAN: + bs.setBool(name, (Boolean)value); + break; + case COUNTER: + // Just a no-op, we shouldn't handle counters the same way than other types + break; + case DECIMAL: + bs.setDecimal(name, (BigDecimal)value); + break; + case DOUBLE: + bs.setDouble(name, (Double)value); + break; + case FLOAT: + bs.setFloat(name, (Float)value); + break; + case INET: + bs.setInet(name, (InetAddress)value); + break; + case INT: + bs.setInt(name, (Integer)value); + break; + case TEXT: + bs.setString(name, (String)value); + break; + case TIMESTAMP: + bs.setDate(name, (Date)value); + break; + case UUID: + bs.setUUID(name, (UUID)value); + break; + case VARCHAR: + bs.setString(name, (String)value); + break; + case VARINT: + bs.setVarint(name, (BigInteger)value); + break; + case TIMEUUID: + bs.setUUID(name, (UUID)value); + break; + default: + throw new RuntimeException("Missing handling of " + type); + } + } + return bs; + } + + public static Object getValue(CQLRow row, String name, DataType type) { + if (type.isCollection()) { + switch (type.asCollection().getKind()) { + case LIST: + return row.getList(name, classOf(((DataType.Collection.List)type).getElementsType())); + case SET: + return row.getSet(name, classOf(((DataType.Collection.Set)type).getElementsType())); + case MAP: + DataType.Collection.Map mt = (DataType.Collection.Map)type; + return row.getMap(name, classOf(mt.getKeysType()), classOf(mt.getValuesType())); + } + } else { + switch (type.asNative()) { + case ASCII: + return row.getString(name); + case BIGINT: + return row.getLong(name); + case BLOB: + return row.getBytes(name); + case BOOLEAN: + return row.getBool(name); + case COUNTER: + return row.getLong(name); + case DECIMAL: + return row.getDecimal(name); + case DOUBLE: + return row.getDouble(name); + case FLOAT: + return row.getFloat(name); + case INET: + return row.getInet(name); + case INT: + return row.getInt(name); + case TEXT: + return row.getString(name); + case TIMESTAMP: + return row.getDate(name); + case UUID: + return row.getUUID(name); + case VARCHAR: + return row.getString(name); + case VARINT: + return row.getVarint(name); + case TIMEUUID: + return row.getUUID(name); + } + } + throw new RuntimeException("Missing handling of " + type); + } + + private static Class classOf(DataType type) { + assert !type.isCollection(); + + switch (type.asNative()) { + case ASCII: + case TEXT: + case VARCHAR: + return String.class; + case BIGINT: + case COUNTER: + return Long.class; + case BLOB: + return ByteBuffer.class; + case BOOLEAN: + return Boolean.class; + case DECIMAL: + return BigDecimal.class; + case DOUBLE: + return Double.class; + case FLOAT: + return Float.class; + case INET: + return InetAddress.class; + case INT: + return Integer.class; + case TIMESTAMP: + return Date.class; + case UUID: + case TIMEUUID: + return UUID.class; + case VARINT: + return BigInteger.class; + } + throw new RuntimeException("Missing handling of " + type); + } + + // Always return the "same" value for each type + public static Object getFixedValue(final DataType type) { + try { + if (type.isCollection()) { + switch (type.asCollection().getKind()) { + case LIST: + return new ArrayList(){{ add(getFixedValue(((DataType.Collection.List)type).getElementsType())); }}; + case SET: + return new HashSet(){{ add(getFixedValue(((DataType.Collection.Set)type).getElementsType())); }}; + case MAP: + final DataType.Collection.Map mt = (DataType.Collection.Map)type; + return new HashMap(){{ put(getFixedValue(mt.getKeysType()), getFixedValue(mt.getValuesType())); }}; + } + } else { + switch (type.asNative()) { + case ASCII: + return "An ascii string"; + case BIGINT: + return 42L; + case BLOB: + return ByteBuffer.wrap(new byte[]{ (byte)4, (byte)12, (byte)1 }); + case BOOLEAN: + return true; + case COUNTER: + throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); + case DECIMAL: + return new BigDecimal("3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679"); + case DOUBLE: + return 3.142519; + case FLOAT: + return 3.142519f; + case INET: + return InetAddress.getByAddress(new byte[]{(byte)127, (byte)0, (byte)0, (byte)1}); + case INT: + return 24; + case TEXT: + return "A text string"; + case TIMESTAMP: + return new Date(1352288289L); + case UUID: + return UUID.fromString("087E9967-CCDC-4A9B-9036-05930140A41B"); + case VARCHAR: + return "A varchar string"; + case VARINT: + return new BigInteger("123456789012345678901234567890"); + case TIMEUUID: + return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); + } + } + } catch (Exception e) { + throw new RuntimeException(e); + } + throw new RuntimeException("Missing handling of " + type); + } +} From 1ca18c64233d2192f2cc3d8fbc6a2d2b33ebcfdc Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 8 Nov 2012 18:58:50 +0100 Subject: [PATCH 065/719] Add authentication --- .../core/AbstractReconnectionHandler.java | 11 +++ .../driver/core/AuthInfoProvider.java | 97 +++++++++++++++++++ .../com/datastax/driver/core/Cluster.java | 77 ++++++++------- .../com/datastax/driver/core/Connection.java | 23 ++++- .../driver/core/HostConnectionPool.java | 7 ++ .../com/datastax/driver/core/Session.java | 4 + .../exceptions/AuthenticationException.java | 25 +++++ 7 files changed, 207 insertions(+), 37 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index c24e61bfeac..9a58d578f63 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -7,6 +7,7 @@ import org.slf4j.LoggerFactory; import com.datastax.driver.core.configuration.ReconnectionPolicy; +import com.datastax.driver.core.exceptions.AuthenticationException; abstract class AbstractReconnectionHandler implements Runnable { @@ -31,6 +32,9 @@ public AbstractReconnectionHandler(ScheduledExecutorService executor, Reconnecti protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { return true; } protected boolean onUnknownException(Exception e, long nextDelayMs) { return true; } + // TODO: maybe be shouldn't retry on authentication exception? + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { return true; } + public void start() { executor.schedule(this, policy.nextDelayMs(), TimeUnit.MILLISECONDS); @@ -67,6 +71,13 @@ public void run() { reschedule(nextDelay); else currentAttempt.compareAndSet(localFuture, null); + } catch (AuthenticationException e) { + logger.error(e.getMessage()); + long nextDelay = policy.nextDelayMs(); + if (onAuthenticationException(e, nextDelay)) + reschedule(nextDelay); + else + currentAttempt.compareAndSet(localFuture, null); } catch (Exception e) { long nextDelay = policy.nextDelayMs(); if (onUnknownException(e, nextDelay)) diff --git a/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java b/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java new file mode 100644 index 00000000000..9cc2bca8165 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java @@ -0,0 +1,97 @@ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.util.*; + +/** + * Authentication informations provider to connect to Cassandra nodes. + *

+ * The authentication information themselves are just a key-value pairs. + * Which exact key-value pairs are required depends on the authenticator + * set for the Cassandra nodes. + */ +public interface AuthInfoProvider { + + /** + * A provider that provides no authencation informations. + *

+ * This is only useful for when no authentication is to be used. + */ + public static final AuthInfoProvider NONE = new AuthInfoProvider() { + public Map getAuthInfos(InetAddress host) { + return Collections.emptyMap(); + } + }; + + /** + * The authentication informations to use to connect to {@code host}. + * + * Please note that if authentication is required, this method will be + * called to initialize each new connection created by the driver. It is + * thus a good idea to make sure this method returns relatively quickly. + * + * @param host the Cassandra host for which authentication information + * are requested. + * @return The authentication informations to use. + */ + public Map getAuthInfos(InetAddress host); + + /** + * A simple {@code AuthInfoProvider} implementation. + *

+ * This provider allows to programmatically define authentication + * information that will then apply to all hosts. + *

+ * Note that it is not safe to add new info to this provider once a + * Cluster instance has been created using this provider. + */ + public static class Simple implements AuthInfoProvider { + + private final Map credentials = new HashMap(); + + /** + * Creates a new, empty, simple authentication info provider. + */ + public Simple() {} + + /** + * Creates a new simple authentication info provider with the + * informations contained in {@code properties}. + * + * @param properties a map of authentication information to use. + */ + public Simple(Map properties) { + this(); + addAll(properties); + } + + public Map getAuthInfos(InetAddress host) { + return credentials; + } + + /** + * Adds a new property to the authentication info returned by this + * provider. + * + * @param property the name of the property to add. + * @param value the value to add for {@code property}. + * @return {@code this} object. + */ + public Simple add(String property, String value) { + credentials.put(property, value); + return this; + } + + /** + * Adds all the key-value pair provided as new authentication + * information returned by this provider. + * + * @param properties a map of authentication information to add. + * @return {@code this} object. + */ + public Simple addAll(Map properties) { + credentials.putAll(properties); + return this; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 7b9e8c22ebc..db33b4a8f46 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -59,8 +59,8 @@ public class Cluster { final Manager manager; - private Cluster(List contactPoints, int port, Policies policies) throws NoHostAvailableException { - this.manager = new Manager(contactPoints, port, policies); + private Cluster(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { + this.manager = new Manager(contactPoints, port, policies, authProvider); } /** @@ -79,6 +79,8 @@ private Cluster(List contactPoints, int port, Policies polici * can be reached. * @throws IllegalArgumentException if the list of contact points provided * by {@code initiazer} is empty or if not all those contact points have the same port. + * @throws AuthenticationException if while contacting the initial + * contact points an authencation error occurs. */ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableException { List contactPoints = initializer.getContactPoints(); @@ -91,7 +93,7 @@ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableE throw new IllegalArgumentException(String.format("Not all hosts have the same port, found port %d and %d", port, a.getPort())); port = a.getPort(); } - return new Cluster(contactPoints, port, initializer.getPolicies()); + return new Cluster(contactPoints, port, initializer.getPolicies(), initializer.getAuthInfoProvider()); } /** @@ -103,18 +105,6 @@ public Session connect() { return manager.newSession(); } - /** - * Creates a new session on this cluster. - * - * @param authInfo The authorisation credentials to use to connect to - * Cassandra nodes. - * @return a new session on this cluster sets to no keyspace. - */ - // TODO - //public Session connect(AuthInfo authInfo) { - // return null; - //} - /** * Creates a new session on this cluster and sets a keyspace to use. * @@ -132,22 +122,6 @@ public Session connect(String keyspace) throws NoHostAvailableException { return session; } - ///** - // * Creates a new session on this cluster and sets a keyspace to use. - // * - // * @param authInfo The authorisation credentials to use to connect to - // * Cassandra nodes. - // * @return a new session on this cluster sets to keyspace - // * {@code keyspaceName}. - // * - // * @throws NoHostAvailableException if no host can be contacted to set the - // * {@code keyspace}. - // */ - // Session session = connect(authInfo); - // session.manager.setKeyspace(keyspace); - // return session; - //} - /** * Returns read-only metadata on the connected cluster. *

@@ -200,6 +174,14 @@ public interface Initializer { * @return the policies to use for this cluster. */ public Policies getPolicies(); + + /** + * The authentication provider to use to connect to the Cassandra cluster. + * + * @return the authentication provider to use. Use + * AuthInfoProvider.NONE if authentication is not to be used. + */ + public AuthInfoProvider getAuthInfoProvider(); } /** @@ -209,6 +191,7 @@ public static class Builder implements Initializer { private final List addresses = new ArrayList(); private int port = DEFAULT_PORT; + private AuthInfoProvider authProvider = AuthInfoProvider.NONE; private LoadBalancingPolicy.Factory loadBalancingPolicyFactory; private ReconnectionPolicy.Factory reconnectionPolicyFactory; @@ -360,6 +343,30 @@ public Policies getPolicies() { ); } + /** + * Use the provided {@code AuthInfoProvider} to connect to Cassandra hosts. + *

+ * This is optional if the Cassandra cluster has been configured to not + * require authentication (the default). + * + * @param authInfoProvider the authentication info provider to use + * @return this Builder + */ + public Builder withAuthInfoProvider(AuthInfoProvider authInfoProvider) { + this.authProvider = authInfoProvider; + return this; + } + + /** + * The authentication provider to use to connect to the Cassandra cluster. + * + * @return the authentication provider set through {@link #withAuthInfoProvider} + * or AuthInfoProvider.NONE if nothing was set. + */ + public AuthInfoProvider getAuthInfoProvider() { + return this.authProvider; + } + /** * Build the cluster with the configured set of initial contact points * and policies. @@ -370,6 +377,8 @@ public Policies getPolicies() { * * @throws NoHostAvailableException if none of the contact points * provided can be reached. + * @throws AuthenticationException if while contacting the initial + * contact points an authencation error occurs. */ public Cluster build() throws NoHostAvailableException { return Cluster.buildFrom(this); @@ -444,12 +453,12 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // less clear behavior. final Map preparedQueries = new ConcurrentHashMap(); - private Manager(List contactPoints, int port, Policies policies) throws NoHostAvailableException { + private Manager(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { this.port = port; this.configuration = new Configuration(policies); this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; - this.connectionFactory = new Connection.Factory(this); + this.connectionFactory = new Connection.Factory(this, authProvider); for (InetSocketAddress address : contactPoints) addHost(address, false); @@ -589,6 +598,8 @@ private void prepareAllQueries(Host host) { } } catch (ConnectionException e) { // Ignore, not a big deal + } catch (AuthenticationException e) { + // That's a bad news, but ignore at this point } catch (BusyConnectionException e) { // Ignore, not a big deal } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index f397dd1e916..18513b6eeaa 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -9,6 +9,7 @@ import java.util.concurrent.atomic.AtomicReference; import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.utils.SimpleFuture; @@ -124,7 +125,18 @@ private void initializeTransport() throws ConnectionException { case ERROR: throw defunct(new TransportException(address, String.format("Error initializing connection", ((ErrorMessage)response).error))); case AUTHENTICATE: - throw new TransportException(address, "Authentication required but not yet supported"); + CredentialsMessage creds = new CredentialsMessage(); + creds.credentials.putAll(factory.authProvider.getAuthInfos(address.getAddress())); + Message.Response authResponse = write(creds).get(); + switch (authResponse.type) { + case READY: + break; + case ERROR: + throw new AuthenticationException(address, (((ErrorMessage)response).error).getMessage()); + default: + throw defunct(new TransportException(address, String.format("Unexpected %s response message from server to a CREDENTIALS message", authResponse.type))); + } + break; default: throw defunct(new TransportException(address, String.format("Unexpected %s response message from server to a STARTUP message", response.type))); } @@ -297,13 +309,16 @@ public static class Factory { private final DefaultResponseHandler defaultHandler; private final ConnectionsConfiguration configuration; - public Factory(Cluster.Manager manager) { - this(manager, manager.configuration.getConnectionsConfiguration()); + private final AuthInfoProvider authProvider; + + public Factory(Cluster.Manager manager, AuthInfoProvider authProvider) { + this(manager, manager.configuration.getConnectionsConfiguration(), authProvider); } - private Factory(DefaultResponseHandler defaultHandler, ConnectionsConfiguration configuration) { + private Factory(DefaultResponseHandler defaultHandler, ConnectionsConfiguration configuration, AuthInfoProvider authProvider) { this.defaultHandler = defaultHandler; this.configuration = configuration; + this.authProvider = authProvider; } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 8a257250c35..7c1f9c25734 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -10,6 +10,7 @@ import org.slf4j.LoggerFactory; import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.exceptions.AuthenticationException; // TODO: We should allow changing the core pool size (i.e. have a method that // adds new connection or trash existing one) @@ -238,6 +239,12 @@ private boolean addConnectionIfUnderMaximum() { if (host.getMonitor().signalConnectionFailure(e)) shutdown(); return false; + } catch (AuthenticationException e) { + // This shouldn't really happen in theory + open.decrementAndGet(); + logger.error("Authentication error while creating additional connection (error is: {})", e.getMessage()); + shutdown(); + return false; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 9af0bea4994..7d32b26bf9b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -428,6 +428,10 @@ private HostConnectionPool addHost(Host host) { logger.debug(String.format("Adding %s to list of queried hosts", host)); return pools.put(host, new HostConnectionPool(host, distance, this)); } + } catch (AuthenticationException e) { + logger.error(String.format("Error creating pool to %s (%s)", host, e.getMessage())); + host.getMonitor().signalConnectionFailure(new ConnectionException(e.getHost(), e.getMessage())); + return pools.get(host); } catch (ConnectionException e) { logger.debug(String.format("Error creating pool to %s (%s)", host, e.getMessage())); host.getMonitor().signalConnectionFailure(e); diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java new file mode 100644 index 00000000000..19c4343fdd9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java @@ -0,0 +1,25 @@ +package com.datastax.driver.core.exceptions; + +import java.net.InetSocketAddress; + +/** + * Indicates an error during the authentication phase while connecting to a node. + */ +public class AuthenticationException extends DriverUncheckedException { + + private final InetSocketAddress host; + + public AuthenticationException(InetSocketAddress host, String message) { + super(String.format("Authentication error on host %s: %s", host, message)); + this.host = host; + } + + /** + * The host for which the authentication failed. + * + * @return the host for which the authentication failed. + */ + public InetSocketAddress getHost() { + return host; + } +} From c7eb2a5cfca424eb7f94296b0d1ef0b4c02f496d Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 9 Nov 2012 13:46:01 +0100 Subject: [PATCH 066/719] Fix (and somewhat test) schema handling --- .../com/datastax/driver/core/Cluster.java | 29 ++++- .../datastax/driver/core/ClusterMetadata.java | 4 +- .../java/com/datastax/driver/core/Codec.java | 1 - .../com/datastax/driver/core/Connection.java | 1 - .../driver/core/ControlConnection.java | 58 +++++----- .../com/datastax/driver/core/ResultSet.java | 19 +-- .../driver/core/RetryingCallback.java | 1 - .../com/datastax/driver/core/Session.java | 9 ++ .../driver/core/{utils => }/SimpleFuture.java | 7 +- .../datastax/driver/core/TableMetadata.java | 108 ++++++++++++------ .../com/datastax/driver/core/CCMBridge.java | 8 +- .../com/datastax/driver/core/SchemaTest.java | 100 ++++++++++++++++ .../com/datastax/driver/core/SessionTest.java | 1 - 13 files changed, 252 insertions(+), 94 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{utils => }/SimpleFuture.java (90%) create mode 100644 driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index db33b4a8f46..d0415577857 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -17,9 +17,10 @@ import com.datastax.driver.core.exceptions.*; import com.datastax.driver.core.configuration.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.log4j.ConsoleAppender; import org.apache.log4j.Level; -import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; /** @@ -42,10 +43,10 @@ */ public class Cluster { - private static final Logger logger = Logger.getLogger(Cluster.class); + private static final Logger logger = LoggerFactory.getLogger(Cluster.class); static { - Logger rootLogger = Logger.getRootLogger(); + org.apache.log4j.Logger rootLogger = org.apache.log4j.Logger.getRootLogger(); if (!rootLogger.getAllAppenders().hasMoreElements()) { rootLogger.setLevel(Level.DEBUG); rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); @@ -605,9 +606,6 @@ private void prepareAllQueries(Host host) { } } - // TODO: take a lock or something so that if a a getSchema() is called, - // we wait for that to be finished. And maybe avoid multiple refresh at - // the same time. public void submitSchemaRefresh(final String keyspace, final String table) { logger.trace("Submitting schema refresh"); executor.submit(new Runnable() { @@ -617,6 +615,25 @@ public void run() { }); } + // refresh the schema using the provided connection, and notice the future with the provided resultset once done + public void refreshSchema(final Connection connection, final SimpleFuture future, final ResultSet rs, final String keyspace, final String table) { + // TODO: figure out why this doesn't work + //logger.debug("Refreshing schema for {}{}", keyspace == null ? "" : keyspace, table == null ? "" : "." + table); + executor.submit(new Runnable() { + public void run() { + try { + ControlConnection.refreshSchema(connection, keyspace, table, Cluster.Manager.this); + } catch (Exception e) { + logger.error("Error during schema refresh ({}). The schema from Cluster.getMetadata() migth appear stale. Asynchronously submitting job to fix.", e.getMessage()); + submitSchemaRefresh(keyspace, table); + } finally { + // Always sets the result + future.set(rs); + } + } + }); + } + // Called when some message has been received but has been initiated from the server (streamId < 0). public void handle(Message.Response response) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 51d55c6141a..4f077827524 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -4,6 +4,8 @@ import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,7 +103,7 @@ synchronized void rebuildSchema(String keyspace, String table, ResultSet ks, Res private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows, Map> colsDefs) { for (CQLRow cfRow : cfRows) { String cfName = cfRow.getString(TableMetadata.CF_NAME); - TableMetadata tm = TableMetadata.build(ksm, cfRow); + TableMetadata tm = TableMetadata.build(ksm, cfRow, !colsDefs.isEmpty()); if (colsDefs == null || colsDefs.get(cfName) == null) continue; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java index 870a6190ad9..b66f33e27ab 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -31,7 +31,6 @@ class Codec { put(UTF8Type.instance, DataType.Native.TEXT); put(DateType.instance, DataType.Native.TIMESTAMP); put(UUIDType.instance, DataType.Native.UUID); - put(UTF8Type.instance, DataType.Native.VARCHAR); put(IntegerType.instance, DataType.Native.VARINT); put(TimeUUIDType.instance, DataType.Native.TIMEUUID); }}; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 18513b6eeaa..e45016448be 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -11,7 +11,6 @@ import com.datastax.driver.core.configuration.*; import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.DriverInternalError; -import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.transport.*; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index fc14dc66821..a08de219765 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -146,31 +146,10 @@ public void refreshSchema(String keyspace, String table) { refreshSchema(connectionRef.get(), keyspace, table); } - private void refreshSchema(Connection connection, String keyspace, String table) { - // Make sure we're up to date on schema + public void refreshSchema(Connection connection, String keyspace, String table) { + logger.debug(String.format("[Control connection] Refreshing schema for %s.%s", keyspace, table)); try { - logger.trace(String.format("[Control connection] Refreshing schema for %s.%s", keyspace, table)); - - String whereClause = ""; - if (keyspace != null) { - whereClause = " WHERE keyspace_name = '" + keyspace + "'"; - if (table != null) - whereClause += " AND columnfamily_name = '" + table + "'"; - } - - ResultSet.Future ksFuture = table == null - ? new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)) - : null; - ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - - if (ksFuture != null) - connection.write(ksFuture.callback); - connection.write(cfFuture.callback); - connection.write(colsFuture.callback); - - // TODO: we should probably do something more fancy, like check if the schema changed and notify whoever wants to be notified - cluster.metadata.rebuildSchema(keyspace, table, ksFuture == null ? null : ksFuture.get(), cfFuture.get(), colsFuture.get()); + refreshSchema(connection, keyspace, table, cluster); } catch (ConnectionException e) { logger.debug(String.format("[Control connection] Connection error when refeshing schema (%s)", e.getMessage())); reconnect(); @@ -181,13 +160,36 @@ private void refreshSchema(Connection connection, String keyspace, String table) logger.error("[Control connection] Unexpected error while refeshing schema", e); reconnect(); } catch (InterruptedException e) { - // TODO: it's bad to do that but at the same time it's annoying to be interrupted - throw new RuntimeException(e); + // If we're interrupted, just move on + } + } + + static void refreshSchema(Connection connection, String keyspace, String table, Cluster.Manager cluster) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + // Make sure we're up to date on schema + String whereClause = ""; + if (keyspace != null) { + whereClause = " WHERE keyspace_name = '" + keyspace + "'"; + if (table != null) + whereClause += " AND columnfamily_name = '" + table + "'"; } + + ResultSet.Future ksFuture = table == null + ? new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)) + : null; + ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + + if (ksFuture != null) + connection.write(ksFuture.callback); + connection.write(cfFuture.callback); + connection.write(colsFuture.callback); + + cluster.metadata.rebuildSchema(keyspace, table, ksFuture == null ? null : ksFuture.get(), cfFuture.get(), colsFuture.get()); } private void refreshNodeList(Connection connection) throws BusyConnectionException { // Make sure we're up to date on node list + logger.debug(String.format("[Control connection] Refreshing node list")); try { ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); @@ -208,7 +210,6 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti host.setLocationInfo(localRow.getString("data_center"), localRow.getString("rack")); } - List foundHosts = new ArrayList(); List dcs = new ArrayList(); List racks = new ArrayList(); @@ -246,8 +247,7 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti logger.debug("[Control connection] Connection is busy, reconnecting"); reconnect(); } catch (InterruptedException e) { - // TODO: it's bad to do that but at the same time it's annoying to be interrupted - throw new RuntimeException(e); + // Interrupted? Then moving on. } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index e3fbfa478ff..147d43f8183 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -15,7 +15,6 @@ import org.apache.cassandra.transport.messages.ResultMessage; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.utils.SimpleFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -197,32 +196,36 @@ public void onSet(Connection connection, Message.Response response) { // propagate the keyspace change to other connections session.poolsState.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); + set(ResultSet.fromMessage(rm, session)); break; case SCHEMA_CHANGE: ResultMessage.SchemaChange scc = (ResultMessage.SchemaChange)rm; + ResultSet rs = ResultSet.fromMessage(rm, session); switch (scc.change) { case CREATED: if (scc.columnFamily.isEmpty()) - session.cluster.manager.submitSchemaRefresh(null, null); + session.cluster.manager.refreshSchema(connection, Future.this, rs, null, null); else - session.cluster.manager.submitSchemaRefresh(scc.keyspace, null); + session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); break; case DROPPED: if (scc.columnFamily.isEmpty()) - session.cluster.manager.submitSchemaRefresh(null, null); + session.cluster.manager.refreshSchema(connection, Future.this, rs, null, null); else - session.cluster.manager.submitSchemaRefresh(scc.keyspace, null); + session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); break; case UPDATED: if (scc.columnFamily.isEmpty()) - session.cluster.manager.submitSchemaRefresh(scc.keyspace, null); + session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); else - session.cluster.manager.submitSchemaRefresh(scc.keyspace, scc.columnFamily); + session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, scc.columnFamily); break; } break; + default: + set(ResultSet.fromMessage(rm, session)); + break; } - set(ResultSet.fromMessage(rm, session)); break; case ERROR: setException(convertException(((ErrorMessage)response).error)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 6ac8f42ff90..fba9a89287d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -11,7 +11,6 @@ import com.datastax.driver.core.configuration.RetryPolicy; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.utils.SimpleFuture; import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.ErrorMessage; diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 7d32b26bf9b..aba98e00ce1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -333,6 +333,15 @@ public void shutdown() { manager.shutdown(); } + /** + * The {@code Cluster} object this session is part of. + * + * @return the {@code Cluster} object this session is part of. + */ + public Cluster getCluster() { + return manager.cluster; + } + private PreparedStatement toPreparedStatement(String query, Connection.Future future) throws NoHostAvailableException { try { diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java similarity index 90% rename from driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java rename to driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java index 8ce8f02d24c..f68741f9467 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/SimpleFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java @@ -1,14 +1,13 @@ -package com.datastax.driver.core.utils; +package com.datastax.driver.core; import com.google.common.util.concurrent.AbstractFuture; /** * A simple future that can be set to a value. * - * Note: this is equivalent to guava SettableFuture, but the latter is a final - * class which is dumb. + * Note: this is equivalent to guava SettableFuture, but the latter is a final class. */ -public class SimpleFuture extends AbstractFuture { +class SimpleFuture extends AbstractFuture { /** * Creates a new {@code SimpleFuture}. diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index fbd568ef482..4aee92232e0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -29,41 +29,36 @@ public class TableMetadata { private final KeyspaceMetadata keyspace; private final String name; - // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. - private final Map columns = new LinkedHashMap(); - private final List partitionKey = new ArrayList(); - private final List clusteringKey = new ArrayList(); + private final List partitionKey; + private final List clusteringKey; + private final Map columns; private final Options options; - private TableMetadata(KeyspaceMetadata keyspace, String name, Options options) { + private TableMetadata(KeyspaceMetadata keyspace, + String name, + List partitionKey, + List clusteringKey, + LinkedHashMap columns, + Options options) { this.keyspace = keyspace; this.name = name; + this.partitionKey = partitionKey; + this.clusteringKey = clusteringKey; + this.columns = columns; this.options = options; } - static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { + static TableMetadata build(KeyspaceMetadata ksm, CQLRow row, boolean hasColumnMetadata) { try { String name = row.getString(CF_NAME); - TableMetadata tm = new TableMetadata(ksm, name, new Options(row)); - // Partition key - AbstractType kt = TypeParser.parse(row.getString(KEY_VALIDATOR)); - List> keyTypes = kt instanceof CompositeType - ? ((CompositeType)kt).types - : Collections.>singletonList(kt); - List keyAliases = fromJsonList(row.getString(KEY_ALIASES)); - for (int i = 0; i < keyTypes.size(); i++) { - String cn = keyAliases.size() > i - ? keyAliases.get(i) - : (i == 0 ? DEFAULT_KEY_ALIAS : DEFAULT_KEY_ALIAS + (i + 1)); - DataType dt = Codec.rawTypeToDataType(keyTypes.get(i)); - ColumnMetadata colMeta = new ColumnMetadata(tm, cn, dt, null); - tm.columns.put(cn, colMeta); - tm.partitionKey.add(colMeta); - } + List partitionKey = new ArrayList(); + List clusteringKey = new ArrayList(); + // We use a linked hashmap because we will keep this in the order of a 'SELECT * FROM ...'. + LinkedHashMap columns = new LinkedHashMap(); - // Clustering key - // TODO: this is actually more complicated than that ... + // First, figure out which kind of table we are + boolean isCompact = false; AbstractType ct = TypeParser.parse(row.getString(COMPARATOR)); boolean isComposite = ct instanceof CompositeType; List> columnTypes = isComposite @@ -72,19 +67,20 @@ static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { List columnAliases = fromJsonList(row.getString(COLUMN_ALIASES)); int clusteringSize; boolean hasValue; + int last = columnTypes.size() - 1; + AbstractType lastType = columnTypes.get(last); if (isComposite) { - if (columnTypes.size() == columnAliases.size()) { + if (lastType instanceof ColumnToCollectionType || (columnAliases.size() == last && lastType instanceof UTF8Type)) { + hasValue = false; + clusteringSize = lastType instanceof ColumnToCollectionType ? last - 1 : last; + } else { + isCompact = true; hasValue = true; clusteringSize = columnTypes.size(); - } else { - hasValue = false; - clusteringSize = columnTypes.get(columnTypes.size() - 1) instanceof ColumnToCollectionType - ? columnTypes.size() - 2 - : columnTypes.size() - 1; } } else { - // TODO: this is not a good test to know if it's dynamic vs static. We should also see if there is any column_metadata - if (columnAliases.size() > 0) { + isCompact = true; + if (!columnAliases.isEmpty() || !hasColumnMetadata) { hasValue = true; clusteringSize = columnTypes.size(); } else { @@ -93,12 +89,31 @@ static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { } } + TableMetadata tm = new TableMetadata(ksm, name, partitionKey, clusteringKey, columns, new Options(row, isCompact)); + + // Partition key + AbstractType kt = TypeParser.parse(row.getString(KEY_VALIDATOR)); + List> keyTypes = kt instanceof CompositeType + ? ((CompositeType)kt).types + : Collections.>singletonList(kt); + List keyAliases = fromJsonList(row.getString(KEY_ALIASES)); + for (int i = 0; i < keyTypes.size(); i++) { + String cn = keyAliases.size() > i + ? keyAliases.get(i) + : (i == 0 ? DEFAULT_KEY_ALIAS : DEFAULT_KEY_ALIAS + (i + 1)); + DataType dt = Codec.rawTypeToDataType(keyTypes.get(i)); + ColumnMetadata colMeta = new ColumnMetadata(tm, cn, dt, null); + columns.put(cn, colMeta); + partitionKey.add(colMeta); + } + + // Clustering key for (int i = 0; i < clusteringSize; i++) { String cn = columnAliases.size() > i ? columnAliases.get(i) : DEFAULT_COLUMN_ALIAS + (i + 1); DataType dt = Codec.rawTypeToDataType(columnTypes.get(i)); ColumnMetadata colMeta = new ColumnMetadata(tm, cn, dt, null); - tm.columns.put(cn, colMeta); - tm.clusteringKey.add(colMeta); + columns.put(cn, colMeta); + clusteringKey.add(colMeta); } // Value alias (if present) @@ -106,7 +121,7 @@ static TableMetadata build(KeyspaceMetadata ksm, CQLRow row) { AbstractType vt = TypeParser.parse(row.getString(VALIDATOR)); String valueAlias = row.isNull(KEY_ALIASES) ? DEFAULT_VALUE_ALIAS : row.getString(VALUE_ALIAS); ColumnMetadata vm = new ColumnMetadata(tm, valueAlias, Codec.rawTypeToDataType(vt), null); - tm.columns.put(valueAlias, vm); + columns.put(valueAlias, vm); } ksm.add(tm); @@ -282,7 +297,7 @@ private String asCQLQuery(boolean formatted) { sb.append("CREATE TABLE ").append(name).append(" ("); newLine(sb, formatted); for (ColumnMetadata cm : columns.values()) - newLine(sb.append(spaces(4, formatted)).append(cm), formatted); + newLine(sb.append(spaces(4, formatted)).append(cm).append(","), formatted); // PK sb.append(spaces(4, formatted)).append("PRIMARY KEY ("); @@ -304,7 +319,12 @@ private String asCQLQuery(boolean formatted) { // end PK // Options - sb.append(") WITH read_repair_chance = ").append(options.readRepair); + if (options.isCompactStorage) { + sb.append(") WITH COMPACT STORAGE"); + and(sb, formatted).append("read_repair_chance = ").append(options.readRepair); + } else { + sb.append(") WITH read_repair_chance = ").append(options.readRepair); + } and(sb, formatted).append("local_read_repair_chance = ").append(options.localReadRepair); and(sb, formatted).append("replicate_on_write = ").append(options.replicateOnWrite); and(sb, formatted).append("gc_grace_seconds = ").append(options.gcGrace); @@ -357,6 +377,8 @@ public static class Options { private static final double DEFAULT_BF_FP_CHANCE = 0.01; + private final boolean isCompactStorage; + private final String comment; private final double readRepair; private final double localReadRepair; @@ -367,7 +389,8 @@ public static class Options { private final Map compaction = new HashMap(); private final Map compression = new HashMap(); - Options(CQLRow row) { + Options(CQLRow row, boolean isCompactStorage) { + this.isCompactStorage = isCompactStorage; this.comment = row.isNull(COMMENT) ? "" : row.getString(COMMENT); this.readRepair = row.getDouble(READ_REPAIR); this.localReadRepair = row.getDouble(LOCAL_READ_REPAIR); @@ -384,6 +407,15 @@ public static class Options { compression.put("params", row.getString(COMPRESSION_PARAMS)); } + /** + * Whether the table uses the {@code COMPACT STORAGE} option. + * + * @return whether the table uses the {@code COMPACT STORAGE} option. + */ + public boolean isCompactStorage() { + return isCompactStorage; + } + /** * The commentary set for this table. * diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index a91be0a42ac..2ca84511f30 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -53,16 +53,16 @@ private CCMBridge() public static CCMBridge create(String name) { CCMBridge bridge = new CCMBridge(); bridge.execute("ccm create %s -b %s", name, CASSANDRA_VERSION); - // Small sleep, otherwise the cluster is not always available - try { Thread.sleep(200); } catch (InterruptedException e) {} + // Small sleep, otherwise the cluster is not always available because ccm create don't wait for the client server to be up + //try { Thread.sleep(500); } catch (InterruptedException e) {} return bridge; } public static CCMBridge create(String name, int nbNodes) { CCMBridge bridge = new CCMBridge(); bridge.execute("ccm create %s -n %d -s -b %s", name, nbNodes, CASSANDRA_VERSION); - // Small sleep, otherwise the cluster is not always available - try { Thread.sleep(200); } catch (InterruptedException e) {} + // See above + //try { Thread.sleep(500); } catch (InterruptedException e) {} return bridge; } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java new file mode 100644 index 00000000000..dea3c109b02 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java @@ -0,0 +1,100 @@ +package com.datastax.driver.core; + +import java.util.*; + +import org.junit.Test; +import static org.junit.Assert.*; + +/** + * Test we correctly process and print schema. + */ +public class SchemaTest extends CCMBridge.PerClassSingleNodeCluster { + + private static final Map cql3 = new HashMap(); + private static final Map compact = new HashMap(); + + protected Collection getTableDefinitions() { + + String sparse = "CREATE TABLE sparse (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 float,\n" + + " l list,\n" + + " v int,\n" + + " PRIMARY KEY (k, c1, c2)\n" + + ")"; + + String st = "CREATE TABLE static (\n" + + " k text,\n" + + " i int,\n" + + " m map,\n" + + " v int,\n" + + " PRIMARY KEY (k)\n" + + ")"; + + String compactStatic = "CREATE TABLE compact_static (\n" + + " k text,\n" + + " i int,\n" + + " t timeuuid,\n" + + " v int,\n" + + " PRIMARY KEY (k)\n" + + ") WITH COMPACT STORAGE"; + + String compactDynamic = "CREATE TABLE compact_dynamic (\n" + + " k text,\n" + + " c int,\n" + + " v timeuuid,\n" + + " PRIMARY KEY (k, c)\n" + + ") WITH COMPACT STORAGE"; + + String compactComposite = "CREATE TABLE compact_composite (\n" + + " k text,\n" + + " c1 int,\n" + + " c2 float,\n" + + " c3 double,\n" + + " v timeuuid,\n" + + " PRIMARY KEY (k, c1, c2, c3)\n" + + ") WITH COMPACT STORAGE"; + + cql3.put("sparse", sparse); + cql3.put("static", st); + compact.put("compact_static", compactStatic); + compact.put("compact_dynamic", compactDynamic); + compact.put("compact_composite", compactComposite); + + List allDefs = new ArrayList(); + allDefs.addAll(cql3.values()); + allDefs.addAll(compact.values()); + return allDefs; + } + + private static String stripOptions(String def, boolean keepFirst) { + if (keepFirst) + return def.split("\n AND ")[0]; + else + return def.split(" WITH ")[0]; + } + + // Note: this test is a bit fragile in the sense that it rely on the exact + // string formatting of exportAsString, but it's a very simple/convenient + // way to check we correctly handle schemas so it's probably not so bad. + // In particular, exportAsString *does not* guarantee that you'll get + // exactly the same string than the one used to create the table. + @Test + public void schemaExportTest() { + + KeyspaceMetadata metadata = cluster.getMetadata().getKeyspace(TestUtils.SIMPLE_KEYSPACE); + + for (Map.Entry tableEntry : cql3.entrySet()) { + String table = tableEntry.getKey(); + String def = tableEntry.getValue(); + assertEquals(def, stripOptions(metadata.getTable(table).exportAsString(), false)); + } + + for (Map.Entry tableEntry : compact.entrySet()) { + String table = tableEntry.getKey(); + String def = tableEntry.getValue(); + assertEquals(def, stripOptions(metadata.getTable(table).exportAsString(), true)); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index b029151c578..f18d2a27266 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -2,7 +2,6 @@ import java.util.*; -import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.*; From 7fad0f4f2905ccd3667d11fa6063fa4fb7537edc Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 9 Nov 2012 14:36:55 +0100 Subject: [PATCH 067/719] Avoid use of String.format for logging to save array allocation --- driver-core/pom.xml | 6 +++++ .../com/datastax/driver/core/Cluster.java | 27 ++++++++++--------- .../com/datastax/driver/core/Connection.java | 24 +++++++++-------- .../driver/core/ControlConnection.java | 25 +++++++++-------- .../driver/core/HostConnectionPool.java | 6 ++--- .../com/datastax/driver/core/ResultSet.java | 2 +- .../driver/core/RetryingCallback.java | 4 +-- .../com/datastax/driver/core/Session.java | 6 ++--- 8 files changed, 56 insertions(+), 44 deletions(-) diff --git a/driver-core/pom.xml b/driver-core/pom.xml index d93f915c40f..8a54a050a87 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -34,6 +34,12 @@ org.apache.thrift libthrift 0.7.0 + + + org.slf4j + slf4j-api + + diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index d0415577857..fabc494ed59 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -491,7 +491,7 @@ private void shutdown() { } public void onUp(Host host) { - logger.trace(String.format("Host %s is UP", host)); + logger.trace("Host {} is UP", host); // If there is a reconnection attempt scheduled for that node, cancel it ScheduledFuture scheduledAttempt = host.reconnectionAttempt.getAndSet(null); @@ -506,13 +506,13 @@ public void onUp(Host host) { } public void onDown(final Host host) { - logger.trace(String.format("Host %s is DOWN", host)); + logger.trace("Host {} is DOWN", host); controlConnection.onDown(host); for (Session s : sessions) s.manager.onDown(host); // Note: we basically waste the first successful reconnection, but it's probably not a big deal - logger.debug(String.format("%s is down, scheduling connection retries", host)); + logger.debug("{} is down, scheduling connection retries", host); new AbstractReconnectionHandler(reconnectionExecutor, configuration.getPolicies().getReconnectionPolicyFactory().create(), host.reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { @@ -520,12 +520,13 @@ protected Connection tryReconnect() throws ConnectionException { } protected void onReconnection(Connection connection) { - logger.debug(String.format("Successful reconnection to %s, setting host UP", host)); + logger.debug("Successful reconnection to {}, setting host UP", host); host.getMonitor().reset(); } protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { - logger.debug(String.format("Failed reconnection to %s (%s), scheduling retry in %d milliseconds", host, e.getMessage(), nextDelayMs)); + if (logger.isDebugEnabled()) + logger.debug("Failed reconnection to {} ({}), scheduling retry in {} milliseconds", new Object[]{ host, e.getMessage(), nextDelayMs}); return true; } @@ -538,7 +539,7 @@ protected boolean onUnknownException(Exception e, long nextDelayMs) { } public void onAdd(Host host) { - logger.trace(String.format("Adding new host %s", host)); + logger.trace("Adding new host {}", host); prepareAllQueries(host); controlConnection.onAdd(host); for (Session s : sessions) @@ -546,7 +547,7 @@ public void onAdd(Host host) { } public void onRemove(Host host) { - logger.trace(String.format("Removing host %s", host)); + logger.trace("Removing host {}", host); controlConnection.onRemove(host); for (Session s : sessions) s.manager.onRemove(host); @@ -555,7 +556,7 @@ public void onRemove(Host host) { public Host addHost(InetSocketAddress address, boolean signal) { Host newHost = metadata.add(address); if (newHost != null && signal) { - logger.info(String.format("New Cassandra host %s added", newHost)); + logger.info("New Cassandra host {} added", newHost); onAdd(newHost); } return newHost; @@ -566,7 +567,7 @@ public void removeHost(Host host) { return; if (metadata.remove(host)) { - logger.info(String.format("Cassandra host %s removed", host)); + logger.info("Cassandra host {} removed", host); onRemove(host); } } @@ -617,8 +618,8 @@ public void run() { // refresh the schema using the provided connection, and notice the future with the provided resultset once done public void refreshSchema(final Connection connection, final SimpleFuture future, final ResultSet rs, final String keyspace, final String table) { - // TODO: figure out why this doesn't work - //logger.debug("Refreshing schema for {}{}", keyspace == null ? "" : keyspace, table == null ? "" : "." + table); + if (logger.isDebugEnabled()) + logger.debug("Refreshing schema for {}{}", keyspace == null ? "" : keyspace, table == null ? "" : "." + table); executor.submit(new Runnable() { public void run() { try { @@ -638,13 +639,13 @@ public void run() { public void handle(Message.Response response) { if (!(response instanceof EventMessage)) { - logger.error("Received an unexpected message from the server: " + response); + logger.error("Received an unexpected message from the server: {}", response); return; } final Event event = ((EventMessage)response).event; - logger.trace(String.format("Received event %s, scheduling delivery", response)); + logger.trace("Received event {}, scheduling delivery", response); // When handle is called, the current thread is a network I/O thread, and we don't want to block // it (typically addHost() will create the connection pool to the new node, which can take time) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index e45016448be..e1f9ffce068 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -87,16 +87,17 @@ private Connection(String name, InetSocketAddress address, Factory factory) thro this.channel = future.awaitUninterruptibly().getChannel(); if (!future.isSuccess()) { - logger.debug(String.format("[%s] Error connecting to %s%s", name, address, extractMessage(future.getCause()))); + if (logger.isDebugEnabled()) + logger.debug(String.format("[%s] Error connecting to %s%s", name, address, extractMessage(future.getCause()))); throw new TransportException(address, "Cannot connect", future.getCause()); } } finally { writer.decrementAndGet(); } - logger.trace(String.format("[%s] Connection opened successfully", name)); + logger.trace("[{}] Connection opened successfully", name); initializeTransport(); - logger.trace(String.format("[%s] Transport initialized and ready", name)); + logger.trace("[{}] Transport initialized and ready", name); } private static String extractMessage(Throwable t) { @@ -175,7 +176,7 @@ public void setKeyspace(String keyspace) throws ConnectionException { return; try { - logger.trace(String.format("[%s] Setting keyspace %s", name, keyspace)); + logger.trace("[{}] Setting keyspace {}", name, keyspace); Message.Response response = write(new QueryMessage("USE \"" + keyspace + "\"", ConsistencyLevel.DEFAULT_CASSANDRA_CL)).get(); switch (response.type) { case RESULT: @@ -236,12 +237,12 @@ public void write(ResponseCallback callback) throws ConnectionException, BusyCon dispatcher.add(handler); request.setStreamId(handler.streamId); - logger.trace(String.format("[%s] writting request %s", name, request)); + logger.trace("[{}] writting request {}", name, request); ChannelFuture writeFuture = channel.write(request); writeFuture.awaitUninterruptibly(); if (!writeFuture.isSuccess()) { - logger.debug(String.format("[%s] Error writting request %s", name, request)); + logger.debug("[{}] Error writting request {}", name, request); // Remove this handler from the dispatcher so it don't get notified of the error // twice (we will fail that method already) dispatcher.removeHandler(handler.streamId); @@ -255,7 +256,7 @@ public void write(ResponseCallback callback) throws ConnectionException, BusyCon throw defunct(ce); } - logger.trace(String.format("[%s] request sent successfully", name)); + logger.trace("[{}] request sent successfully", name); } finally { writer.decrementAndGet(); @@ -266,7 +267,7 @@ public void close() { if (isClosed) return; - logger.trace(String.format("[%s] closing connection", name)); + logger.trace("[{}] closing connection", name); // Make sure all new writes are rejected isClosed = true; @@ -395,14 +396,14 @@ public void removeHandler(int streamId) { @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { if (!(e.getMessage() instanceof Message.Response)) { - logger.debug(String.format("[%s] Received unexpected message: %s", name, e.getMessage())); + logger.debug("[{}] Received unexpected message: {}", name, e.getMessage()); defunct(new TransportException(address, "Unexpected message received: " + e.getMessage())); // TODO: we should allow calling some handler for such error } else { Message.Response response = (Message.Response)e.getMessage(); int streamId = response.getStreamId(); - logger.trace(String.format("[%s] received: %s", name, e.getMessage())); + logger.trace("[{}] received: {}", name, e.getMessage()); if (streamId < 0) { factory.defaultHandler().handle(response); @@ -421,7 +422,8 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { @Override public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) { - logger.trace(String.format("[%s] connection error", name), e.getCause()); + if (logger.isTraceEnabled()) + logger.trace(String.format("[%s] connection error", name), e.getCause()); // Ignore exception while writting, this will be handled by write() directly if (writer.get() > 0) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index a08de219765..6c31a21d710 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -80,12 +80,12 @@ protected void onReconnection(Connection connection) { } protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { - logger.error(String.format("[Control connection] Cannot connect to any host, scheduling retry in %d milliseconds", nextDelayMs)); + logger.error("[Control connection] Cannot connect to any host, scheduling retry in {} milliseconds", nextDelayMs); return true; } protected boolean onUnknownException(Exception e, long nextDelayMs) { - logger.error(String.format("[Control connection ]Unknown error during reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); + logger.error(String.format("[Control connection] Unknown error during reconnection, scheduling retry in %d milliseconds", nextDelayMs), e); return true; } }.start(); @@ -93,7 +93,7 @@ protected boolean onUnknownException(Exception e, long nextDelayMs) { } private void setNewConnection(Connection newConnection) { - logger.debug(String.format("[Control connection] Successfully connected to %s", newConnection.address)); + logger.debug("[Control connection] Successfully connected to {}", newConnection.address); Connection old = connectionRef.getAndSet(newConnection); if (old != null && !old.isClosed()) old.close(); @@ -112,10 +112,12 @@ private Connection reconnectInternal() throws NoHostAvailableException { errors = new HashMap(); errors.put(e.address, e.getMessage()); - if (iter.hasNext()) { - logger.debug(String.format("[Control connection] Failed connecting to %s, trying next host", host)); - } else { - logger.debug(String.format("[Control connection] Failed connecting to %s, no more host to try", host)); + if (logger.isDebugEnabled()) { + if (iter.hasNext()) { + logger.debug("[Control connection] Failed connecting to {}, trying next host", host); + } else { + logger.debug("[Control connection] Failed connecting to {}, no more host to try", host); + } } } } @@ -147,11 +149,11 @@ public void refreshSchema(String keyspace, String table) { } public void refreshSchema(Connection connection, String keyspace, String table) { - logger.debug(String.format("[Control connection] Refreshing schema for %s.%s", keyspace, table)); + logger.debug("[Control connection] Refreshing schema for {}.{}", keyspace, table); try { refreshSchema(connection, keyspace, table, cluster); } catch (ConnectionException e) { - logger.debug(String.format("[Control connection] Connection error when refeshing schema (%s)", e.getMessage())); + logger.debug("[Control connection] Connection error when refeshing schema ({})", e.getMessage()); reconnect(); } catch (BusyConnectionException e) { logger.debug("[Control connection] Connection is busy, reconnecting"); @@ -238,7 +240,7 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti cluster.removeHost(host); } catch (ConnectionException e) { - logger.debug(String.format("[Control connection] Connection error when refeshing hosts list (%s)", e.getMessage())); + logger.debug("[Control connection] Connection error when refeshing hosts list ({})", e.getMessage()); reconnect(); } catch (ExecutionException e) { logger.error("[Control connection] Unexpected error while refeshing hosts list", e); @@ -260,7 +262,8 @@ public void onDown(Host host) { // If that's the host we're connected to, and we haven't yet schedul a reconnection, pre-emptively start one Connection current = connectionRef.get(); - logger.trace(String.format("[Control connection] %s is down, currently connected to %s", host, current == null ? "nobody" : current.address)); + if (logger.isTraceEnabled()) + logger.trace("[Control connection] %s is down, currently connected to {}", host, current == null ? "nobody" : current.address); if (current != null && current.address.equals(host.getAddress()) && reconnectionAttempt.get() == null) reconnect(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 7c1f9c25734..429fe96086e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -55,7 +55,7 @@ public void run() { this.connections = new CopyOnWriteArrayList(l); this.open = new AtomicInteger(connections.size()); - logger.trace(String.format("Created connection pool to host %s", host)); + logger.trace("Created connection pool to host {}", host); } private PoolingOptions options() { @@ -235,7 +235,7 @@ private boolean addConnectionIfUnderMaximum() { return true; } catch (ConnectionException e) { open.decrementAndGet(); - logger.debug("Connection error to " + host + " while creating additional connection"); + logger.debug("Connection error to {} while creating additional connection", host); if (host.getMonitor().signalConnectionFailure(e)) shutdown(); return false; @@ -257,7 +257,7 @@ private void maybeSpawnNewConnection() { break; } - logger.debug("Creating new connection on busy pool to " + host); + logger.debug("Creating new connection on busy pool to {}", host); manager.executor().submit(newConnectionTask); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 147d43f8183..0b39a74abd1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -63,7 +63,7 @@ private static ResultSet fromMessage(ResultMessage msg, Session.Manager session) case PREPARED: throw new RuntimeException("Prepared statement received when a ResultSet was expected"); default: - logger.error(String.format("Received unknow result type '%s'; returning empty result set", msg.kind)); + logger.error("Received unknow result type '{}'; returning empty result set", msg.kind); return empty(trace); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index fba9a89287d..5d33dd194fe 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -102,7 +102,7 @@ private boolean query(Host host) { } private void logError(InetSocketAddress address, String msg) { - logger.debug(String.format("Error querying %s, trying next host (error is: %s)", address, msg)); + logger.debug("Error querying {}, trying next host (error is: {})", address, msg); if (errors == null) errors = new HashMap(); errors.put(address, msg); @@ -186,7 +186,7 @@ public void onSet(Connection connection, Message.Response response) { return; case IS_BOOTSTRAPPING: // Try another node - logger.error("Query sent to %s but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); + logger.error("Query sent to {} but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); retry(false, null); return; case UNPREPARED: diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index aba98e00ce1..623abeb6ee5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -434,15 +434,15 @@ private HostConnectionPool addHost(Host host) { if (distance == HostDistance.IGNORED) { return pools.get(host); } else { - logger.debug(String.format("Adding %s to list of queried hosts", host)); + logger.debug("Adding {} to list of queried hosts", host); return pools.put(host, new HostConnectionPool(host, distance, this)); } } catch (AuthenticationException e) { - logger.error(String.format("Error creating pool to %s (%s)", host, e.getMessage())); + logger.error("Error creating pool to {} ({})", host, e.getMessage()); host.getMonitor().signalConnectionFailure(new ConnectionException(e.getHost(), e.getMessage())); return pools.get(host); } catch (ConnectionException e) { - logger.debug(String.format("Error creating pool to %s (%s)", host, e.getMessage())); + logger.debug("Error creating pool to {} ({})", host, e.getMessage()); host.getMonitor().signalConnectionFailure(e); return pools.get(host); } From 1cbf6ca1456277a5c89398d32b43336f0acdd0c4 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 9 Nov 2012 15:17:31 +0100 Subject: [PATCH 068/719] Use InetAddress everywhere (instead of InetSocketAddress, since we don't support per-host port) --- .../core/AbstractReconnectionHandler.java | 2 +- .../com/datastax/driver/core/Cluster.java | 69 +++++++++++-------- .../datastax/driver/core/ClusterMetadata.java | 8 +-- .../com/datastax/driver/core/Connection.java | 35 +++++----- .../driver/core/ConnectionException.java | 8 +-- .../ConnectionsConfiguration.java | 14 ++-- .../driver/core/ControlConnection.java | 16 ++--- .../java/com/datastax/driver/core/Host.java | 10 +-- .../driver/core/HostConnectionPool.java | 2 +- .../{configuration => }/PoolingOptions.java | 4 +- .../{configuration => }/ProtocolOptions.java | 20 +++++- .../driver/core/RetryingCallback.java | 12 ++-- .../com/datastax/driver/core/Session.java | 6 +- .../{configuration => }/SocketOptions.java | 2 +- .../driver/core/TransportException.java | 6 +- .../exceptions/AuthenticationException.java | 8 +-- .../exceptions/NoHostAvailableException.java | 12 ++-- .../LoadBalancingPolicy.java | 2 +- .../{configuration => policies}/Policies.java | 2 +- .../ReconnectionPolicy.java | 2 +- .../RetryPolicy.java | 2 +- 21 files changed, 137 insertions(+), 105 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => }/ConnectionsConfiguration.java (68%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => }/PoolingOptions.java (99%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => }/ProtocolOptions.java (72%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => }/SocketOptions.java (97%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => policies}/LoadBalancingPolicy.java (99%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => policies}/Policies.java (96%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => policies}/ReconnectionPolicy.java (99%) rename driver-core/src/main/java/com/datastax/driver/core/{configuration => policies}/RetryPolicy.java (99%) diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index 9a58d578f63..5a13ed741fc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -6,7 +6,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.datastax.driver.core.configuration.ReconnectionPolicy; +import com.datastax.driver.core.policies.ReconnectionPolicy; import com.datastax.driver.core.exceptions.AuthenticationException; abstract class AbstractReconnectionHandler implements Runnable { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index fabc494ed59..2def2c6f646 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -1,7 +1,6 @@ package com.datastax.driver.core; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.*; import java.util.concurrent.*; @@ -15,7 +14,7 @@ import org.apache.cassandra.transport.messages.QueryMessage; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.policies.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,7 +59,7 @@ public class Cluster { final Manager manager; - private Cluster(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { + private Cluster(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { this.manager = new Manager(contactPoints, port, policies, authProvider); } @@ -84,17 +83,11 @@ private Cluster(List contactPoints, int port, Policies polici * contact points an authencation error occurs. */ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableException { - List contactPoints = initializer.getContactPoints(); + List contactPoints = initializer.getContactPoints(); if (contactPoints.isEmpty()) throw new IllegalArgumentException("Cannot build a cluster without contact points"); - int port = -1; - for (InetSocketAddress a : contactPoints) { - if (port != -1 && a.getPort() != port) - throw new IllegalArgumentException(String.format("Not all hosts have the same port, found port %d and %d", port, a.getPort())); - port = a.getPort(); - } - return new Cluster(contactPoints, port, initializer.getPolicies(), initializer.getAuthInfoProvider()); + return new Cluster(contactPoints, initializer.getPort(), initializer.getPolicies(), initializer.getAuthInfoProvider()); } /** @@ -167,7 +160,18 @@ public interface Initializer { * @return the initial Cassandra contact points. See {@link Builder#addContactPoint} * for more details on contact points. */ - public List getContactPoints(); + public List getContactPoints(); + + /** + * The port to use to connect to Cassandra hosts. + *

+ * This port will be used to connect to all of the Cassandra cluster + * hosts, not only the contact points. This means that all Cassandra + * host must be configured to listen on the same port. + * + * @return the port to use to connect to Cassandra hosts. + */ + public int getPort(); /** * Returns the policies to use for this cluster. @@ -198,11 +202,8 @@ public static class Builder implements Initializer { private ReconnectionPolicy.Factory reconnectionPolicyFactory; private RetryPolicy retryPolicy; - public List getContactPoints() { - List cp = new ArrayList(addresses.size()); - for (InetAddress address : addresses) - cp.add(new InetSocketAddress(address, port)); - return cp; + public List getContactPoints() { + return addresses; } /** @@ -219,6 +220,15 @@ public Builder withPort(int port) { return this; } + /** + * The port to use to connect to Cassandra hosts. + * + * @return the port to use to connect to Cassandra hosts. + */ + public int getPort() { + return port; + } + /** * Adds a contact point. * @@ -392,10 +402,11 @@ public Cluster build() throws NoHostAvailableException { public static class Configuration { private final Policies policies; - private final ConnectionsConfiguration connections = new ConnectionsConfiguration(); + private final ConnectionsConfiguration connections; - private Configuration(Policies policies) { + private Configuration(Cluster.Manager manager, Policies policies) { this.policies = policies; + this.connections = new ConnectionsConfiguration(manager); } /** @@ -428,7 +439,7 @@ public ConnectionsConfiguration getConnectionsConfiguration() { class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // Initial contacts point - final List contactPoints; + final List contactPoints; final int port; private final Set sessions = new CopyOnWriteArraySet(); @@ -454,14 +465,14 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // less clear behavior. final Map preparedQueries = new ConcurrentHashMap(); - private Manager(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { + private Manager(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { this.port = port; - this.configuration = new Configuration(policies); + this.configuration = new Configuration(this, policies); this.metadata = new ClusterMetadata(this); this.contactPoints = contactPoints; this.connectionFactory = new Connection.Factory(this, authProvider); - for (InetSocketAddress address : contactPoints) + for (InetAddress address : contactPoints) addHost(address, false); this.controlConnection = new ControlConnection(this); @@ -553,7 +564,7 @@ public void onRemove(Host host) { s.manager.onRemove(host); } - public Host addHost(InetSocketAddress address, boolean signal) { + public Host addHost(InetAddress address, boolean signal) { Host newHost = metadata.add(address); if (newHost != null && signal) { logger.info("New Cassandra host {} added", newHost); @@ -573,7 +584,7 @@ public void removeHost(Host host) { } // Prepare a query on all nodes - public void prepare(MD5Digest digest, String query, InetSocketAddress toExclude) { + public void prepare(MD5Digest digest, String query, InetAddress toExclude) { preparedQueries.put(digest, query); for (Session s : sessions) s.manager.prepare(query, toExclude); @@ -659,10 +670,10 @@ public void run() { Event.TopologyChange tpc = (Event.TopologyChange)event; switch (tpc.change) { case NEW_NODE: - addHost(tpc.node, true); + addHost(tpc.node.getAddress(), true); break; case REMOVED_NODE: - removeHost(metadata.getHost(tpc.node)); + removeHost(metadata.getHost(tpc.node.getAddress())); break; } break; @@ -670,10 +681,10 @@ public void run() { Event.StatusChange stc = (Event.StatusChange)event; switch (stc.status) { case UP: - Host host = metadata.getHost(stc.node); + Host host = metadata.getHost(stc.node.getAddress()); if (host == null) { // first time we heard about that node apparently, add it - addHost(stc.node, true); + addHost(stc.node.getAddress(), true); } else { onUp(host); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 4f077827524..5333d7f97d5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -1,6 +1,6 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -19,7 +19,7 @@ public class ClusterMetadata { private final Cluster.Manager cluster; volatile String clusterName; - private final ConcurrentMap hosts = new ConcurrentHashMap(); + private final ConcurrentMap hosts = new ConcurrentHashMap(); private final ConcurrentMap keyspaces = new ConcurrentHashMap(); ClusterMetadata(Cluster.Manager cluster) { @@ -114,7 +114,7 @@ private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows } } - Host add(InetSocketAddress address) { + Host add(InetAddress address) { Host newHost = new Host(address, cluster.convictionPolicyFactory); Host previous = hosts.putIfAbsent(address, newHost); if (previous == null) @@ -132,7 +132,7 @@ boolean remove(Host host) { return hosts.remove(host.getAddress()) != null; } - Host getHost(InetSocketAddress address) { + Host getHost(InetAddress address) { return hosts.get(address); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index e1f9ffce068..17209ad5c4f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -1,5 +1,6 @@ package com.datastax.driver.core; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Iterator; import java.util.HashMap; @@ -8,7 +9,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.policies.*; import com.datastax.driver.core.exceptions.AuthenticationException; import com.datastax.driver.core.exceptions.DriverInternalError; @@ -43,7 +44,7 @@ public void addConnection(Channel ch, org.apache.cassandra.transport.Connection public void closeAll() {} }; - public final InetSocketAddress address; + public final InetAddress address; private final String name; private final ClientBootstrap bootstrap; @@ -69,7 +70,7 @@ public void closeAll() {} * @throws ConnectionException if the connection attempts fails or is * refused by the server. */ - private Connection(String name, InetSocketAddress address, Factory factory) throws ConnectionException { + private Connection(String name, InetAddress address, Factory factory) throws ConnectionException { super(EMPTY_TRACKER); this.address = address; @@ -79,7 +80,7 @@ private Connection(String name, InetSocketAddress address, Factory factory) thro bootstrap.setPipelineFactory(new PipelineFactory(this)); - ChannelFuture future = bootstrap.connect(address); + ChannelFuture future = bootstrap.connect(new InetSocketAddress(address, factory.port)); writer.incrementAndGet(); try { @@ -126,7 +127,7 @@ private void initializeTransport() throws ConnectionException { throw defunct(new TransportException(address, String.format("Error initializing connection", ((ErrorMessage)response).error))); case AUTHENTICATE: CredentialsMessage creds = new CredentialsMessage(); - creds.credentials.putAll(factory.authProvider.getAuthInfos(address.getAddress())); + creds.credentials.putAll(factory.authProvider.getAuthInfos(address)); Message.Response authResponse = write(creds).get(); switch (authResponse.type) { case READY: @@ -302,20 +303,22 @@ public String toString() { public static class Factory { + public final int port; private final ExecutorService bossExecutor = Executors.newCachedThreadPool(); private final ExecutorService workerExecutor = Executors.newCachedThreadPool(); private final ConcurrentMap idGenerators = new ConcurrentHashMap(); - private final DefaultResponseHandler defaultHandler; - private final ConnectionsConfiguration configuration; + public final DefaultResponseHandler defaultHandler; + public final ConnectionsConfiguration configuration; - private final AuthInfoProvider authProvider; + public final AuthInfoProvider authProvider; public Factory(Cluster.Manager manager, AuthInfoProvider authProvider) { - this(manager, manager.configuration.getConnectionsConfiguration(), authProvider); + this(manager.port, manager, manager.configuration.getConnectionsConfiguration(), authProvider); } - private Factory(DefaultResponseHandler defaultHandler, ConnectionsConfiguration configuration, AuthInfoProvider authProvider) { + private Factory(int port, DefaultResponseHandler defaultHandler, ConnectionsConfiguration configuration, AuthInfoProvider authProvider) { + this.port = port; this.defaultHandler = defaultHandler; this.configuration = configuration; this.authProvider = authProvider; @@ -329,7 +332,7 @@ private Factory(DefaultResponseHandler defaultHandler, ConnectionsConfiguration * @throws ConnectionException if connection attempt fails. */ public Connection open(Host host) throws ConnectionException { - InetSocketAddress address = host.getAddress(); + InetAddress address = host.getAddress(); String name = address.toString() + "-" + getIdGenerator(host).getAndIncrement(); return new Connection(name, address, this); } @@ -372,10 +375,6 @@ private ClientBootstrap bootstrap() { return b; } - - public DefaultResponseHandler defaultHandler() { - return defaultHandler; - } } private class Dispatcher extends SimpleChannelUpstreamHandler { @@ -406,7 +405,7 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { logger.trace("[{}] received: {}", name, e.getMessage()); if (streamId < 0) { - factory.defaultHandler().handle(response); + factory.defaultHandler.handle(response); return; } @@ -445,7 +444,7 @@ public void errorOutAllHandler(ConnectionException ce) { static class Future extends SimpleFuture implements ResponseCallback { private final Message.Request request; - private volatile InetSocketAddress address; + private volatile InetAddress address; public Future(Message.Request request) { this.request = request; @@ -464,7 +463,7 @@ public void onException(Connection connection, Exception exception) { super.setException(exception); } - public InetSocketAddress getAddress() { + public InetAddress getAddress() { return address; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java index 5c02388fbd2..0d44b5980f2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConnectionException.java @@ -1,18 +1,18 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; class ConnectionException extends Exception { - public final InetSocketAddress address; + public final InetAddress address; - public ConnectionException(InetSocketAddress address, String msg, Throwable cause) + public ConnectionException(InetAddress address, String msg, Throwable cause) { super(msg, cause); this.address = address; } - public ConnectionException(InetSocketAddress address, String msg) + public ConnectionException(InetAddress address, String msg) { super(msg); this.address = address; diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java similarity index 68% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java rename to driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java index f9874c45c0a..eb4469c087c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/ConnectionsConfiguration.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core; /** * Handle all configuration related of the connections to the Cassandra hosts. @@ -12,9 +12,15 @@ */ public class ConnectionsConfiguration { - private final SocketOptions socketOptions = new SocketOptions(); - private final ProtocolOptions protocolOptions = new ProtocolOptions(); - private final PoolingOptions poolingOptions = new PoolingOptions(); + private final SocketOptions socketOptions; + private final ProtocolOptions protocolOptions; + private final PoolingOptions poolingOptions; + + ConnectionsConfiguration(Cluster.Manager manager) { + this.socketOptions = new SocketOptions(); + this.protocolOptions = new ProtocolOptions(manager); + this.poolingOptions = new PoolingOptions(); + } /** * The socket options. diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 6c31a21d710..76811b10d0e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -1,6 +1,6 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.util.*; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.*; @@ -14,7 +14,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.policies.*; import com.datastax.driver.core.exceptions.DriverInternalError; import com.datastax.driver.core.exceptions.NoHostAvailableException; @@ -102,14 +102,14 @@ private void setNewConnection(Connection newConnection) { private Connection reconnectInternal() throws NoHostAvailableException { Iterator iter = balancingPolicy.newQueryPlan(QueryOptions.DEFAULT); - Map errors = null; + Map errors = null; while (iter.hasNext()) { Host host = iter.next(); try { return tryConnect(host); } catch (ConnectionException e) { if (errors == null) - errors = new HashMap(); + errors = new HashMap(); errors.put(e.address, e.getMessage()); if (logger.isDebugEnabled()) { @@ -121,7 +121,7 @@ private Connection reconnectInternal() throws NoHostAvailableException { } } } - throw new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors); + throw new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors); } private Connection tryConnect(Host host) throws ConnectionException { @@ -212,13 +212,13 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti host.setLocationInfo(localRow.getString("data_center"), localRow.getString("rack")); } - List foundHosts = new ArrayList(); + List foundHosts = new ArrayList(); List dcs = new ArrayList(); List racks = new ArrayList(); for (CQLRow row : peersFuture.get()) { if (!row.isNull("peer")) { - foundHosts.add(new InetSocketAddress(row.getInet("peer"), cluster.port)); + foundHosts.add(row.getInet("peer")); dcs.add(row.getString("data_center")); racks.add(row.getString("rack")); } @@ -234,7 +234,7 @@ private void refreshNodeList(Connection connection) throws BusyConnectionExcepti } // Removes all those that seems to have been removed (since we lost the control connection) - Set foundHostsSet = new HashSet(foundHosts); + Set foundHostsSet = new HashSet(foundHosts); for (Host host : cluster.metadata.allHosts()) if (!host.getAddress().equals(connection.address) && !foundHostsSet.contains(host.getAddress())) cluster.removeHost(host); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 105475fb9c4..8212f5dc244 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -1,6 +1,6 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.CopyOnWriteArraySet; @@ -13,7 +13,7 @@ */ public class Host { - private final InetSocketAddress address; + private final InetAddress address; private final HealthMonitor monitor; private volatile String datacenter; @@ -24,7 +24,7 @@ public class Host { // ClusterMetadata keeps one Host object per inet address, so don't use // that constructor unless you know what you do (use ClusterMetadata.getHost typically). - Host(InetSocketAddress address, ConvictionPolicy.Factory policy) { + Host(InetAddress address, ConvictionPolicy.Factory policy) { if (address == null || policy == null) throw new NullPointerException(); @@ -40,9 +40,9 @@ void setLocationInfo(String datacenter, String rack) { /** * Returns the node address. * - * @return the node {@link InetSocketAddress}. + * @return the node {@link InetAddress}. */ - public InetSocketAddress getAddress() { + public InetAddress getAddress() { return address; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 429fe96086e..1d979120259 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -9,7 +9,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.policies.*; import com.datastax.driver.core.exceptions.AuthenticationException; // TODO: We should allow changing the core pool size (i.e. have a method that diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java rename to driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java index d4367a0ede4..563344a8718 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/PoolingOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java @@ -1,6 +1,4 @@ -package com.datastax.driver.core.configuration; - -import com.datastax.driver.core.HostDistance; +package com.datastax.driver.core; /** * Options related to connection pooling. diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java similarity index 72% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java rename to driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java index 977cf15c8a8..f00861446d5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/ProtocolOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java @@ -1,10 +1,16 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core; /** * Options of the Cassandra native binary protocol. */ public class ProtocolOptions { + private final Cluster.Manager manager; + + ProtocolOptions(Cluster.Manager manager) { + this.manager = manager; + } + /** * Compression supported by the Cassandra binary protocol. */ @@ -28,6 +34,18 @@ public String toString() { private volatile Compression compression = Compression.NONE; + /** + * The port to use to connect to the Cassandra hosts. + *

+ * The port must be set at cluster creation time (using {@link Cluster.Builder#withPort} + * for instance) and cannot be changed afterwards. + * + * @return the port to use to connect to the Cassandra hosts. + */ + public int getPort() { + return manager.port; + } + /** * Returns the compression used by the protocol. *

diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 5d33dd194fe..5427ec32a5e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -1,6 +1,6 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.util.Collections; import java.util.Iterator; import java.util.HashMap; @@ -9,7 +9,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.ExecutionException; -import com.datastax.driver.core.configuration.RetryPolicy; +import com.datastax.driver.core.policies.RetryPolicy; import com.datastax.driver.core.exceptions.*; import org.apache.cassandra.transport.Message; @@ -45,7 +45,7 @@ class RetryingCallback implements Connection.ResponseCallback { private volatile int queryRetries; private volatile ConsistencyLevel retryConsistencyLevel; - private volatile Map errors; + private volatile Map errors; public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback, QueryOptions queryOptions) { this.manager = manager; @@ -61,7 +61,7 @@ public void sendRequest() { if (query(host)) return; } - callback.onException(null, new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); + callback.onException(null, new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); } private boolean query(Host host) { @@ -101,10 +101,10 @@ private boolean query(Host host) { } } - private void logError(InetSocketAddress address, String msg) { + private void logError(InetAddress address, String msg) { logger.debug("Error querying {}, trying next host (error is: {})", address, msg); if (errors == null) - errors = new HashMap(); + errors = new HashMap(); errors.put(address, msg); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 623abeb6ee5..0f920ae0a8c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -1,12 +1,12 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import com.datastax.driver.core.exceptions.*; -import com.datastax.driver.core.configuration.*; +import com.datastax.driver.core.policies.*; import org.apache.cassandra.transport.Message; import org.apache.cassandra.transport.messages.*; @@ -527,7 +527,7 @@ public void execute(Connection.ResponseCallback callback, QueryOptions options) new RetryingCallback(this, callback, options).sendRequest(); } - public void prepare(String query, InetSocketAddress toExclude) { + public void prepare(String query, InetAddress toExclude) { for (Map.Entry entry : pools.entrySet()) { if (entry.getKey().getAddress().equals(toExclude)) continue; diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java similarity index 97% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java rename to driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java index e5d49b395d2..de98b952377 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/SocketOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core; /** * Options to configure low-level socket options for the connections kept diff --git a/driver-core/src/main/java/com/datastax/driver/core/TransportException.java b/driver-core/src/main/java/com/datastax/driver/core/TransportException.java index 39747ce486e..d395726741b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TransportException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TransportException.java @@ -1,6 +1,6 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; /** * A connection exception that has to do with the transport itself, i.e. that @@ -8,12 +8,12 @@ */ class TransportException extends ConnectionException { - public TransportException(InetSocketAddress address, String msg, Throwable cause) + public TransportException(InetAddress address, String msg, Throwable cause) { super(address, msg, cause); } - public TransportException(InetSocketAddress address, String msg) + public TransportException(InetAddress address, String msg) { super(address, msg); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java index 19c4343fdd9..f7c8400e6c3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/AuthenticationException.java @@ -1,15 +1,15 @@ package com.datastax.driver.core.exceptions; -import java.net.InetSocketAddress; +import java.net.InetAddress; /** * Indicates an error during the authentication phase while connecting to a node. */ public class AuthenticationException extends DriverUncheckedException { - private final InetSocketAddress host; + private final InetAddress host; - public AuthenticationException(InetSocketAddress host, String message) { + public AuthenticationException(InetAddress host, String message) { super(String.format("Authentication error on host %s: %s", host, message)); this.host = host; } @@ -19,7 +19,7 @@ public AuthenticationException(InetSocketAddress host, String message) { * * @return the host for which the authentication failed. */ - public InetSocketAddress getHost() { + public InetAddress getHost() { return host; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java index eb22dd5bde6..a18af555e35 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/NoHostAvailableException.java @@ -1,6 +1,6 @@ package com.datastax.driver.core.exceptions; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.util.HashMap; import java.util.Map; @@ -19,9 +19,9 @@ */ public class NoHostAvailableException extends DriverException { - private final Map errors; + private final Map errors; - public NoHostAvailableException(Map errors) { + public NoHostAvailableException(Map errors) { super(makeMessage(errors)); this.errors = errors; } @@ -33,11 +33,11 @@ public NoHostAvailableException(Map errors) { * @return a map containing for each tried host a description of the error * triggered when trying it. */ - public Map getErrors() { - return new HashMap(errors); + public Map getErrors() { + return new HashMap(errors); } - private static String makeMessage(Map errors) { + private static String makeMessage(Map errors) { return String.format("All host tried for query are in error (tried: %s)", errors.keySet()); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java index c319ef04115..053f4dae2a8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core.policies; import java.util.*; import java.util.concurrent.*; diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java similarity index 96% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/Policies.java rename to driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java index 4324cded6e4..3372ee0403a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core.policies; public class Policies { diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java index 66479f42b6a..0e291d09c14 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core.policies; /** * Policy that decides how often the reconnection to a dead node is attempted. diff --git a/driver-core/src/main/java/com/datastax/driver/core/configuration/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/configuration/RetryPolicy.java rename to driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java index d5bbae2f7d1..4ff9f43e643 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/configuration/RetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java @@ -1,4 +1,4 @@ -package com.datastax.driver.core.configuration; +package com.datastax.driver.core.policies; import org.slf4j.Logger; import org.slf4j.LoggerFactory; From 131594052eb70ee7696ca09cf469859297e555d8 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 9 Nov 2012 18:44:57 +0100 Subject: [PATCH 069/719] Fix a number of outstanding todos --- .../core/AbstractReconnectionHandler.java | 10 +++-- .../com/datastax/driver/core/Cluster.java | 7 ++++ .../datastax/driver/core/ClusterMetadata.java | 3 -- .../com/datastax/driver/core/Connection.java | 11 ++--- .../driver/core/ConnectionsConfiguration.java | 2 +- .../java/com/datastax/driver/core/Host.java | 6 ++- .../driver/core/HostConnectionPool.java | 42 +++++++++++++++---- .../datastax/driver/core/HostDistance.java | 3 +- .../driver/core/KeyspaceMetadata.java | 2 +- .../datastax/driver/core/PoolingOptions.java | 14 ++++++- .../com/datastax/driver/core/ResultSet.java | 37 ++++++++-------- .../com/datastax/driver/core/Session.java | 15 ++++--- .../datastax/driver/core/TableMetadata.java | 37 +++++++++++----- .../InvalidConfigurationInQueryException.java | 15 +++++++ .../driver/core/exceptions/package-info.java | 4 ++ .../datastax/driver/core/package-info.java | 6 +++ .../core/policies/LoadBalancingPolicy.java | 6 +-- .../driver/core/policies/package-info.java | 4 ++ .../com/datastax/driver/core/CCMBridge.java | 2 - .../com/datastax/driver/core/SchemaTest.java | 38 +++++++++++++---- .../com/datastax/driver/core/SessionTest.java | 13 +++++- 21 files changed, 201 insertions(+), 76 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/package-info.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index 5a13ed741fc..8cc6b1aac5d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -32,8 +32,8 @@ public AbstractReconnectionHandler(ScheduledExecutorService executor, Reconnecti protected boolean onConnectionException(ConnectionException e, long nextDelayMs) { return true; } protected boolean onUnknownException(Exception e, long nextDelayMs) { return true; } - // TODO: maybe be shouldn't retry on authentication exception? - protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { return true; } + // Retrying on authenciation error is unlikely to work + protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { return false; } public void start() { executor.schedule(this, policy.nextDelayMs(), TimeUnit.MILLISECONDS); @@ -74,10 +74,12 @@ public void run() { } catch (AuthenticationException e) { logger.error(e.getMessage()); long nextDelay = policy.nextDelayMs(); - if (onAuthenticationException(e, nextDelay)) + if (onAuthenticationException(e, nextDelay)) { reschedule(nextDelay); - else + } else { + logger.error("Retry against {} have been suspended. It won't be retried unless the node is restarted.", e.getHost()); currentAttempt.compareAndSet(localFuture, null); + } } catch (Exception e) { long nextDelay = policy.nextDelayMs(); if (onUnknownException(e, nextDelay)) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 2def2c6f646..c0323f84961 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -583,6 +583,13 @@ public void removeHost(Host host) { } } + public void ensurePoolsSizing() { + for (Session session : sessions) { + for (HostConnectionPool pool : session.manager.pools.values()) + pool.ensureCoreConnections(); + } + } + // Prepare a query on all nodes public void prepare(MD5Digest digest, String query, InetAddress toExclude) { preparedQueries.put(digest, query); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 5333d7f97d5..c11b5708e8f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -193,9 +193,6 @@ public List getKeyspaces() { * @return the CQL queries representing this cluster schema as a {code * String}. */ - // TODO: add some boolean arg to deal with thift defs that can't be fully - // represented by CQL queries (like either throw an exception or - // do-our-best). Or some other way to deal with that. public String exportSchemaAsString() { StringBuilder sb = new StringBuilder(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 17209ad5c4f..942fe1683f0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -158,7 +158,7 @@ public ConnectionException lastException() { return exception; } - private ConnectionException defunct(ConnectionException e) { + ConnectionException defunct(ConnectionException e) { exception = e; isDefunct = true; dispatcher.errorOutAllHandler(e); @@ -395,9 +395,8 @@ public void removeHandler(int streamId) { @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { if (!(e.getMessage() instanceof Message.Response)) { - logger.debug("[{}] Received unexpected message: {}", name, e.getMessage()); + logger.error("[{}] Received unexpected message: {}", name, e.getMessage()); defunct(new TransportException(address, "Unexpected message received: " + e.getMessage())); - // TODO: we should allow calling some handler for such error } else { Message.Response response = (Message.Response)e.getMessage(); int streamId = response.getStreamId(); @@ -412,8 +411,10 @@ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { ResponseHandler handler = pending.remove(streamId); streamIdHandler.release(streamId); if (handler == null) { - // TODO: we should handle those with a default handler - throw new RuntimeException("No handler set for " + streamId + ", handlers = " + pending); + // Note: this is a bug, either us or cassandra. So log it, but I'm not sure it's worth breaking + // the connection for that. + logger.error("[{}] No handler set for stream {} (this is a bug, either of this driver or of Cassandra, you should report it)", name, streamId); + return; } handler.callback.onSet(Connection.this, response); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java index eb4469c087c..4d0f74db098 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java @@ -19,7 +19,7 @@ public class ConnectionsConfiguration { ConnectionsConfiguration(Cluster.Manager manager) { this.socketOptions = new SocketOptions(); this.protocolOptions = new ProtocolOptions(manager); - this.poolingOptions = new PoolingOptions(); + this.poolingOptions = new PoolingOptions(manager); } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 8212f5dc244..0ccdf9dbb29 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -152,7 +152,6 @@ public boolean isUp() { return isUp; } - // TODO: Should we bother making sure that multiple calls to this don't inform the listeners twice? private void setDown() { isUp = false; for (Host.StateListener listener : listeners) @@ -181,6 +180,11 @@ boolean signalConnectionFailure(ConnectionException exception) { /** * Interface for listener that are interested in hosts add, up, down and * remove events. + *

+ * Note that particularly for up and down events, it is possible that the + * same event be delivered multiple times. Listeners should thus be + * resilient and ignore a down (resp. up) event if the node has already + * been signaled down (resp. up). */ public interface StateListener { diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 1d979120259..eab1b23d20d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -12,8 +12,6 @@ import com.datastax.driver.core.policies.*; import com.datastax.driver.core.exceptions.AuthenticationException; -// TODO: We should allow changing the core pool size (i.e. have a method that -// adds new connection or trash existing one) class HostConnectionPool { private static final Logger logger = LoggerFactory.getLogger(HostConnectionPool.class); @@ -64,12 +62,17 @@ private PoolingOptions options() { public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { if (isShutdown.get()) - // TODO: have a specific exception + // Note: throwing a ConnectionException is probably fine in practice as it will trigger the creation of a new host. + // That being said, maybe having a specific exception could be cleaner. throw new ConnectionException(host.getAddress(), "Pool is shutdown"); if (connections.isEmpty()) { - for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) + for (int i = 0; i < options().getCoreConnectionsPerHost(hostDistance); i++) { + // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to + // protect against creating connection in excess of core too quickly + scheduledForCreation.incrementAndGet(); manager.executor().submit(newConnectionTask); + } return waitForConnection(timeout, unit); } @@ -105,12 +108,10 @@ private static long elapsed(long start, TimeUnit unit) { return unit.convert(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS); } - private void awaitAvailableConnection(long timeout, TimeUnit unit) { + private void awaitAvailableConnection(long timeout, TimeUnit unit) throws InterruptedException { waitLock.lock(); try { hasAvailableConnection.await(timeout, unit); - } catch (InterruptedException e) { - // TODO: Do we want to stop ignoring that? } finally { waitLock.unlock(); } @@ -138,7 +139,12 @@ private Connection waitForConnection(long timeout, TimeUnit unit) throws Connect long start = System.currentTimeMillis(); long remaining = timeout; do { - awaitAvailableConnection(remaining, unit); + try { + awaitAvailableConnection(remaining, unit); + } catch (InterruptedException e) { + // If we're interrupted fine, check if there is a connection available but stop waiting otherwise + timeout = 0; // this will make us stop the loop if we don't get a connection right away + } if (isShutdown()) throw new ConnectionException(host.getAddress(), "Pool is shutdown"); @@ -302,9 +308,27 @@ private void discardAvailableConnections() { } } + // This creates connections if we have less than core connections (if we + // have more than core, connection will just get trash when we can). + public void ensureCoreConnections() { + if (isShutdown()) + return; + + // Note: this process is a bit racy, but it doesn't matter since we're still guaranteed to not create + // more connection than maximum (and if we create more than core connection due to a race but this isn't + // justified by the load, the connection in excess will be quickly trashed anyway) + int opened = open.get(); + for (int i = opened; i < options().getCoreConnectionsPerHost(hostDistance); i++) { + // We don't respect MAX_SIMULTANEOUS_CREATION here because it's only to + // protect against creating connection in excess of core too quickly + scheduledForCreation.incrementAndGet(); + manager.executor().submit(newConnectionTask); + } + } + static class PoolState { - private volatile String keyspace; + volatile String keyspace; public void setKeyspace(String keyspace) { this.keyspace = keyspace; diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java index 4644650435a..c7883580760 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostDistance.java @@ -2,7 +2,8 @@ /** * The distance to a Cassandra node as assigned by a - * {@link LoadBalancingPolicy} (through its {@code distance} method). + * {@link com.datastax.driver.core.policies.LoadBalancingPolicy} (through its {@code + * distance} method). * * The distance assigned to an host influence how many connections the driver * maintains towards this host. If for a given host the assigned {@code HostDistance} diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index 2d8338e8808..e3ab11d493d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -102,7 +102,7 @@ public String exportAsString() { sb.append(asCQLQuery()).append("\n"); for (TableMetadata tm : tables.values()) - sb.append("\n").append(tm.exportAsString()); + sb.append("\n").append(tm.exportAsString()).append("\n"); return sb.toString(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java index 563344a8718..7f774e55b99 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java @@ -36,6 +36,8 @@ public class PoolingOptions { private static final int DEFAULT_MAX_POOL_LOCAL = 8; private static final int DEFAULT_MAX_POOL_REMOTE = 2; + private final Cluster.Manager manager; + private volatile int minSimultaneousRequestsForLocal = DEFAULT_MIN_REQUESTS; private volatile int minSimultaneousRequestsForRemote = DEFAULT_MIN_REQUESTS; @@ -48,6 +50,10 @@ public class PoolingOptions { private volatile int maxConnectionsForLocal = DEFAULT_MAX_POOL_LOCAL; private volatile int maxConnectionsForRemote = DEFAULT_MAX_POOL_REMOTE; + PoolingOptions(Cluster.Manager manager) { + this.manager = manager; + } + /** * Number of simultaneous requests on a connection below which * connections in excess are reclaimed. @@ -183,13 +189,18 @@ public int getCoreConnectionsPerHost(HostDistance distance) { * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. */ public PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int coreConnections) { - // TODO: make sure the pools are updated accordingly switch (distance) { case LOCAL: + int oldLocalCore = coreConnectionsForLocal; coreConnectionsForLocal = coreConnections; + if (oldLocalCore < coreConnectionsForLocal) + manager.ensurePoolsSizing(); break; case REMOTE: + int oldRemoteCore = coreConnectionsForRemote; coreConnectionsForRemote = coreConnections; + if (oldRemoteCore < coreConnectionsForRemote) + manager.ensurePoolsSizing(); break; default: throw new IllegalArgumentException("Cannot set core connections per host for " + distance + " hosts"); @@ -227,7 +238,6 @@ public int getMaxConnectionPerHost(HostDistance distance) { * @throws IllegalArgumentException if {@code distance == HostDistance.IGNORED}. */ public PoolingOptions setMaxConnectionsPerHost(HostDistance distance, int maxConnections) { - // TODO: make sure the pools are updated accordingly switch (distance) { case LOCAL: maxConnectionsForLocal = maxConnections; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 0b39a74abd1..a0065f11442 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -190,10 +190,6 @@ public void onSet(Connection connection, Message.Response response) { ResultMessage rm = (ResultMessage)response; switch (rm.kind) { case SET_KEYSPACE: - // TODO: I think there is a problem if someone set - // a keyspace, then drop it. But that basically - // means we should reset the keyspace to null in that case. - // propagate the keyspace change to other connections session.poolsState.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); set(ResultSet.fromMessage(rm, session)); @@ -203,22 +199,28 @@ public void onSet(Connection connection, Message.Response response) { ResultSet rs = ResultSet.fromMessage(rm, session); switch (scc.change) { case CREATED: - if (scc.columnFamily.isEmpty()) + if (scc.columnFamily.isEmpty()) { session.cluster.manager.refreshSchema(connection, Future.this, rs, null, null); - else + } else { session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); + } break; case DROPPED: - if (scc.columnFamily.isEmpty()) + if (scc.columnFamily.isEmpty()) { + // If that the one keyspace we are logged in, reset to null (it shouldn't really happen but ...) + if (scc.keyspace.equals(session.poolsState.keyspace)) + session.poolsState.setKeyspace(null); session.cluster.manager.refreshSchema(connection, Future.this, rs, null, null); - else + } else { session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); + } break; case UPDATED: - if (scc.columnFamily.isEmpty()) + if (scc.columnFamily.isEmpty()) { session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); - else + } else { session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, scc.columnFamily); + } break; } break; @@ -231,12 +233,14 @@ public void onSet(Connection connection, Message.Response response) { setException(convertException(((ErrorMessage)response).error)); break; default: - // TODO: handle errors (set the connection to defunct as this mean it is in a bad state) - throw new RuntimeException(); + // This mean we have probably have a bad node, so defunct the connection + connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); + setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); + break; } - } catch (Exception e) { - // TODO: do better - throw new RuntimeException(e); + } catch (RuntimeException e) { + // If we get a bug here, the client will not get it, so better forwarding the error + setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); } } @@ -368,8 +372,7 @@ static Exception convertException(org.apache.cassandra.exceptions.TransportExcep case INVALID: return new InvalidQueryException(te.getMessage()); case CONFIG_ERROR: - // TODO: I don't know if it's worth having a specific exception for that - return new InvalidQueryException(te.getMessage()); + return new InvalidConfigurationInQueryException(te.getMessage()); case ALREADY_EXISTS: org.apache.cassandra.exceptions.AlreadyExistsException aee = (org.apache.cassandra.exceptions.AlreadyExistsException)te; return new AlreadyExistsException(aee.ksName, aee.cfName); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 0f920ae0a8c..a5a7f806db2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -95,7 +95,7 @@ public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHo * by this method. * * @param query the CQL query to execute. - * @param queryOptions the options to use for this query. This includes at + * @param options the options to use for this query. This includes at * least the consistency level for the operation. * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). @@ -157,8 +157,8 @@ public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) * method) to make sure the query was successful. * * @param query the CQL query to execute. - * @param queryOptions the options to use for this query. This includes at - * least the consistency level for the operation. + * @param options the options to use for this query. This includes at least + * the consistency level for the operation. * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). */ @@ -241,8 +241,8 @@ public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consisten * database. * * @param stmt the prepared statement with values for its bound variables. - * @param queryOptions the options to use for this query. This includes at - * least the consistency level for the operation. + * @param options the options to use for this query. This includes at least + * the consistency level for the operation. * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). * @@ -352,6 +352,7 @@ private PreparedStatement toPreparedStatement(String query, Connection.Future fu response = future.get(); } catch (InterruptedException e) { // TODO: decide wether we want to expose Interrupted exceptions or not + throw new RuntimeException(e); } } } catch (ExecutionException e) { @@ -410,7 +411,6 @@ public ExecutorService executor() { public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; - // TODO: consider the use of NonBlockingHashMap this.pools = new ConcurrentHashMap(hosts.size()); this.loadBalancer = cluster.manager.configuration.getPolicies().getLoadBalancingPolicyFactory().create(hosts); this.poolsState = new HostConnectionPool.PoolState(); @@ -503,8 +503,7 @@ public void setKeyspace(String keyspace) throws NoHostAvailableException { try { executeQuery(new QueryMessage("use " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL), new QueryOptions()).get(); } catch (InterruptedException e) { - // TODO: do we want to handle interrupted exception in a better way? - throw new DriverInternalError("Hey! I was waiting!", e); + // If we're interrupted, then fine, we stop waiting, but the user shouldn't complain if the keyspace is not set. } catch (ExecutionException e) { Throwable cause = e.getCause(); // A USE query should never fail unless we cannot contact a node diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index 4aee92232e0..bbc3ce6a12b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -263,14 +263,14 @@ void add(ColumnMetadata column) { public String exportAsString() { StringBuilder sb = new StringBuilder(); - sb.append(asCQLQuery(true)).append("\n"); + sb.append(asCQLQuery(true)); for (ColumnMetadata column : columns.values()) { ColumnMetadata.IndexMetadata index = column.getIndex(); if (index == null) continue; - sb.append(index.asCQLQuery()).append("\n"); + sb.append("\n").append(index.asCQLQuery()); } return sb.toString(); } @@ -325,17 +325,34 @@ private String asCQLQuery(boolean formatted) { } else { sb.append(") WITH read_repair_chance = ").append(options.readRepair); } - and(sb, formatted).append("local_read_repair_chance = ").append(options.localReadRepair); + and(sb, formatted).append("dclocal_read_repair_chance = ").append(options.localReadRepair); and(sb, formatted).append("replicate_on_write = ").append(options.replicateOnWrite); and(sb, formatted).append("gc_grace_seconds = ").append(options.gcGrace); and(sb, formatted).append("bloom_filter_fp_chance = ").append(options.bfFpChance); and(sb, formatted).append("caching = ").append(options.caching); if (options.comment != null) and(sb, formatted).append("comment = '").append(options.comment).append("'"); + and(sb, formatted).append("compaction = ").append(formatOptionMap(options.compaction)); + and(sb, formatted).append("compression = ").append(formatOptionMap(options.compression)); + sb.append(";"); + return sb.toString(); + } - // TODO: finish (compaction and compression) - newLine(sb, formatted); - + private static String formatOptionMap(Map m) { + StringBuilder sb = new StringBuilder(); + sb.append("{ "); + boolean first = true; + for (Map.Entry entry : m.entrySet()) { + if (first) first = false; else sb.append(", "); + sb.append("'").append(entry.getKey()).append("'"); + sb.append(" : "); + try { + sb.append(Integer.parseInt(entry.getValue())); + } catch (NumberFormatException e) { + sb.append("'").append(entry.getValue()).append("'"); + } + } + sb.append(" }"); return sb.toString(); } @@ -399,12 +416,10 @@ public static class Options { this.bfFpChance = row.isNull(BF_FP_CHANCE) ? DEFAULT_BF_FP_CHANCE : row.getDouble(BF_FP_CHANCE); this.caching = row.getString(CACHING); - // TODO: this should change (split options and handle min/max threshold in particular) - compaction.put("class", row.getString(COMPACTION_CLASS)); - compaction.put("options", row.getString(COMPACTION_OPTIONS)); + this.compaction.put("class", row.getString(COMPACTION_CLASS)); + this.compaction.putAll(fromJsonMap(row.getString(COMPACTION_OPTIONS))); - // TODO: this should split the parameters - compression.put("params", row.getString(COMPRESSION_PARAMS)); + this.compression.putAll(fromJsonMap(row.getString(COMPRESSION_PARAMS))); } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java new file mode 100644 index 00000000000..a1513bfd5f5 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/InvalidConfigurationInQueryException.java @@ -0,0 +1,15 @@ +package com.datastax.driver.core.exceptions; + +/** + * A specific invalid query exception that indicates that the query is invalid + * because of some configuration problem. + *

+ * This is generally throw by query that manipulate the schema (CREATE and + * ALTER) when the required configuration options are invalid. + */ +public class InvalidConfigurationInQueryException extends InvalidQueryException { + + public InvalidConfigurationInQueryException(String msg) { + super(msg); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java new file mode 100644 index 00000000000..b9f04c5b4e2 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/package-info.java @@ -0,0 +1,4 @@ +/** + * Exceptions thrown by the DataStax Java driver for Cassandra. + */ +package com.datastax.driver.core.exceptions; diff --git a/driver-core/src/main/java/com/datastax/driver/core/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/package-info.java new file mode 100644 index 00000000000..d5cbc9d6c2a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/package-info.java @@ -0,0 +1,6 @@ +/** + * The main package for the DataStax Java driver for Cassandra. + *

+ * The main entry for this package is the {@code Cluster} class. + */ +package com.datastax.driver.core; diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java index 053f4dae2a8..8b47763b214 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -19,9 +19,9 @@ * find which host to query first, and which hosts to use as failover. * *

- * The {@code LoadBalancingPolicy} is a {@link Host.StateListener} and is thus - * informed of hosts up/down events. For efficiency purposes, the policy is - * expected to exclude down hosts from query plans. + * The {@code LoadBalancingPolicy} is a {@link com.datastax.driver.core.Host.StateListener} + * and is thus informed of hosts up/down events. For efficiency purposes, the + * policy is expected to exclude down hosts from query plans. */ public interface LoadBalancingPolicy extends Host.StateListener { diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java new file mode 100644 index 00000000000..ad7bd35d5aa --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/package-info.java @@ -0,0 +1,4 @@ +/** + * Policies that allow to control some of the behavior of the DataStax Java driver for Cassandra. + */ +package com.datastax.driver.core.policies; diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index 2ca84511f30..22917aa6fc4 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -98,7 +98,6 @@ private void execute(String command, Object... args) { int retValue = p.waitFor(); if (retValue != 0) { - // TODO: try to gather what the problem was BufferedReader outReader = new BufferedReader(new InputStreamReader(p.getInputStream())); BufferedReader errReader = new BufferedReader(new InputStreamReader(p.getErrorStream())); @@ -162,7 +161,6 @@ public static void discardCluster() { } } - @Before public void maybeCreateSchema() throws NoHostAvailableException { diff --git a/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java b/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java index dea3c109b02..aa24909b8a8 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SchemaTest.java @@ -13,6 +13,8 @@ public class SchemaTest extends CCMBridge.PerClassSingleNodeCluster { private static final Map cql3 = new HashMap(); private static final Map compact = new HashMap(); + private static String withOptions; + protected Collection getTableDefinitions() { String sparse = "CREATE TABLE sparse (\n" @@ -22,7 +24,7 @@ protected Collection getTableDefinitions() { + " l list,\n" + " v int,\n" + " PRIMARY KEY (k, c1, c2)\n" - + ")"; + + ");"; String st = "CREATE TABLE static (\n" + " k text,\n" @@ -30,7 +32,7 @@ protected Collection getTableDefinitions() { + " m map,\n" + " v int,\n" + " PRIMARY KEY (k)\n" - + ")"; + + ");"; String compactStatic = "CREATE TABLE compact_static (\n" + " k text,\n" @@ -38,14 +40,14 @@ protected Collection getTableDefinitions() { + " t timeuuid,\n" + " v int,\n" + " PRIMARY KEY (k)\n" - + ") WITH COMPACT STORAGE"; + + ") WITH COMPACT STORAGE;"; String compactDynamic = "CREATE TABLE compact_dynamic (\n" + " k text,\n" + " c int,\n" + " v timeuuid,\n" + " PRIMARY KEY (k, c)\n" - + ") WITH COMPACT STORAGE"; + + ") WITH COMPACT STORAGE;"; String compactComposite = "CREATE TABLE compact_composite (\n" + " k text,\n" @@ -54,7 +56,7 @@ protected Collection getTableDefinitions() { + " c3 double,\n" + " v timeuuid,\n" + " PRIMARY KEY (k, c1, c2, c3)\n" - + ") WITH COMPACT STORAGE"; + + ") WITH COMPACT STORAGE;"; cql3.put("sparse", sparse); cql3.put("static", st); @@ -62,17 +64,32 @@ protected Collection getTableDefinitions() { compact.put("compact_dynamic", compactDynamic); compact.put("compact_composite", compactComposite); + withOptions = "CREATE TABLE with_options (\n" + + " k text,\n" + + " i int,\n" + + " PRIMARY KEY (k)\n" + + ") WITH read_repair_chance = 0.5\n" + + " AND dclocal_read_repair_chance = 0.6\n" + + " AND replicate_on_write = true\n" + + " AND gc_grace_seconds = 42\n" + + " AND bloom_filter_fp_chance = 0.01\n" + + " AND caching = ALL\n" + + " AND comment = 'My awesome table'\n" + + " AND compaction = { 'class' : 'org.apache.cassandra.db.compaction.LeveledCompactionStrategy', 'sstable_size_in_mb' : 15 }\n" + + " AND compression = { 'sstable_compression' : 'org.apache.cassandra.io.compress.SnappyCompressor', 'chunk_length_kb' : 128 };"; + List allDefs = new ArrayList(); allDefs.addAll(cql3.values()); allDefs.addAll(compact.values()); + allDefs.add(withOptions); return allDefs; } private static String stripOptions(String def, boolean keepFirst) { if (keepFirst) - return def.split("\n AND ")[0]; + return def.split("\n AND ")[0] + ";"; else - return def.split(" WITH ")[0]; + return def.split(" WITH ")[0] + ";"; } // Note: this test is a bit fragile in the sense that it rely on the exact @@ -97,4 +114,11 @@ public void schemaExportTest() { assertEquals(def, stripOptions(metadata.getTable(table).exportAsString(), true)); } } + + // Same remark as the preceding test + @Test + public void schemaExportOptionsTest() { + TableMetadata metadata = cluster.getMetadata().getKeyspace(TestUtils.SIMPLE_KEYSPACE).getTable("with_options"); + assertEquals(withOptions, metadata.exportAsString()); + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index f18d2a27266..61aad733199 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -6,7 +6,6 @@ import static org.junit.Assert.*; import com.datastax.driver.core.exceptions.*; -import static com.datastax.driver.core.TestUtils.*; /** * Simple test of the Sessions methods against a one node cluster. @@ -70,4 +69,16 @@ private static void checkExecuteResultSet(ResultSet rs, String key) { assertEquals(42, row.getInt("i")); assertEquals(24.03f, row.getFloat("f"), 0.1f); } + + @Test + public void setAndDropKeyspace() throws Exception { + // Check that if someone set a keyspace and then drop it, we recognize + // that fact and don't assume he is still set to this keyspace + + session.execute(String.format(TestUtils.CREATE_KEYSPACE_SIMPLE_FORMAT, "to_drop", 1)); + session.execute("USE to_drop"); + session.execute("DROP KEYSPACE to_drop"); + + assertEquals(null, session.manager.poolsState.keyspace); + } } From 23c84d6f13f3b0d07a7117055060c0ce5eb9d954 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 13 Nov 2012 11:30:24 +0100 Subject: [PATCH 070/719] Rework API --- driver-core/pom.xml | 4 +- .../datastax/driver/core/BoundStatement.java | 4 +- .../datastax/driver/core/CQLStatement.java | 18 ++ .../driver/core/ControlConnection.java | 2 +- .../driver/core/PreparedStatement.java | 4 +- .../java/com/datastax/driver/core/Query.java | 78 ++++++ .../datastax/driver/core/QueryOptions.java | 72 ------ .../com/datastax/driver/core/QueryTrace.java | 4 +- .../driver/core/RetryingCallback.java | 4 +- .../com/datastax/driver/core/Session.java | 242 +++--------------- .../datastax/driver/core/SimpleStatement.java | 18 ++ .../core/policies/LoadBalancingPolicy.java | 8 +- .../com/datastax/driver/core/CCMBridge.java | 2 +- .../driver/core/PreparedStatementTest.java | 8 +- .../com/datastax/driver/core/SessionTest.java | 19 +- 15 files changed, 181 insertions(+), 306 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/Query.java delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 8a54a050a87..6307f7a21dc 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -27,7 +27,7 @@ org.apache.cassandra cassandra-thrift - 1.2.0-SNAPSHOT + 1.2.0-beta2 @@ -54,7 +54,7 @@ default default - cassandra-1.2.0-beta2 + 1.2.0-beta2 true diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index afd2321ce95..482cc6d108c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -14,9 +14,9 @@ * A prepared statement with values bound to the bind variables. *

* Once a BoundStatement has values for all the variables of the {@link PreparedStatement} - * it has been created from, it can executed through {@link Session#executePrepared}. + * it has been created from, it can executed (through {@link Session#execute}). */ -public class BoundStatement { +public class BoundStatement extends Query { final PreparedStatement statement; final ByteBuffer[] values; diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java b/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java new file mode 100644 index 00000000000..38083d78b71 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java @@ -0,0 +1,18 @@ +package com.datastax.driver.core; + +/** + * A non-prepared CQL statement. + *

+ * This class represents a query string along with query options. This class + * can be extended but {@link SimpleStatement} is provided to build a {@code + * CQLStatement} directly from its query string. + */ +public abstract class CQLStatement extends Query { + + /** + * The query string for this statement. + * + * @return a valid CQL query string. + */ + public abstract String getQueryString(); +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 76811b10d0e..de24f76c62e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -101,7 +101,7 @@ private void setNewConnection(Connection newConnection) { private Connection reconnectInternal() throws NoHostAvailableException { - Iterator iter = balancingPolicy.newQueryPlan(QueryOptions.DEFAULT); + Iterator iter = balancingPolicy.newQueryPlan(Query.DEFAULT); Map errors = null; while (iter.hasNext()) { Host host = iter.next(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index a8856f88cd7..17ac49a36f2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -11,8 +11,8 @@ *

* A prepared statement can be executed once concrete values has been provided * for the bound variables. The pair of a prepared statement and values for its - * bound variables is a BoundStatement and can be executed by - * {@link Session#executePrepared}. + * bound variables is a BoundStatement and can be executed (by + * {@link Session#execute}). */ public class PreparedStatement { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Query.java b/driver-core/src/main/java/com/datastax/driver/core/Query.java new file mode 100644 index 00000000000..6bca927dc97 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Query.java @@ -0,0 +1,78 @@ +package com.datastax.driver.core; + +/** + * An executable query. + *

+ * This represents either a {@link CQLStatement} or a {@link BoundStatement} + * along with the query options (consistency level, whether to trace the query, ...). + */ +public abstract class Query { + + // An exception to the CQLStatement or BoundStatement rule above. This is + // used when preparing a statement and for other internal queries. Do not expose publicly. + static final Query DEFAULT = new Query() {}; + + private volatile ConsistencyLevel consistency; + private volatile boolean traceQuery; + + // We don't want to expose the constructor, because the code rely on this being only subclassed by CQLStatement and BoundStatement + Query() { + this.consistency = ConsistencyLevel.ONE; + } + + /** + * Sets the consistency level for the query. + *

+ * The default consistency level, if this method is not called, is ConsistencyLevel.ONE. + * + * @param consistency the consistency level to set. + * @return this {@code Query} object. + */ + public Query setConsistencyLevel(ConsistencyLevel consistency) { + this.consistency = consistency; + return this; + } + + /** + * The consistency level. + * + * @return the consistency level. Returns {@code ConsistencyLeve.ONE} if no + * consistency level has been specified. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistency; + } + + /** + * Enable tracing for this query. + * + * By default (i.e. unless you call this method), tracing is not enabled. + * + * @return this {@code Query} object. + */ + public Query setTracing() { + this.traceQuery = true; + return this; + } + + /** + * Disable tracing for this query. + * + * @return this {@code Query} object. + */ + public Query unsetTracing() { + this.traceQuery = false; + return this; + } + + /** + * Whether tracing is enabled for this query or not. + * + * @return {@code true} if this query has tracing enabled, {@code false} + * otherwise. + */ + public boolean isTracing() { + return traceQuery; + } + +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java b/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java deleted file mode 100644 index 15824285947..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryOptions.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.datastax.driver.core; - -public class QueryOptions { - - // Don't expose that publicly as this would break if someone uses - // traceQuery (and don't use internally if traceQuery is set). - static final QueryOptions DEFAULT = new QueryOptions(); - - protected final ConsistencyLevel consistency; - protected volatile boolean traceQuery; - - /** - * Creates a new query options object with default consistency level - * (ConsistencyLevel.ONE). - */ - public QueryOptions() { - this(null); - } - - /** - * Creates a new query options ojbect using the provided consistency. - * - * @param consistency the consistency level to use for the query. If {@code - * null} is provided and the request requires a consistency level, - * ConsistencyLevel.ONE is used. - */ - public QueryOptions(ConsistencyLevel consistency) { - this.consistency = consistency; - } - - /** - * The consistency level. - * - * @return the consistency level. Returns {@code null} if no consistency - * level has been specified. - */ - public ConsistencyLevel getConsistencyLevel() { - return consistency; - } - - /** - * Enable tracing for the query using these options. - * - * By default (i.e. unless you call this method), tracing is not enabled. - * - * @return this {@code QueryOptions} object. - */ - public QueryOptions setTracing() { - traceQuery = true; - return this; - } - - /** - * Disable tracing for the query using these options. - * - * @return this {@code QueryOptions} object. - */ - public QueryOptions unsetTracing() { - traceQuery = false; - return this; - } - - /** - * Whether to trace the query or not. - * - * @return {@code true} if this QueryOptions has query tracing enable, - * {@code false} otherwise. - */ - public boolean isTracing() { - return traceQuery; - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java index cb75159723d..5044b07ea69 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -148,8 +148,8 @@ private void maybeFetchTrace() { private void doFetchTrace() { try { - ResultSet.Future sessionsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_SESSIONS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), QueryOptions.DEFAULT); - ResultSet.Future eventsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_EVENTS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), QueryOptions.DEFAULT); + ResultSet.Future sessionsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_SESSIONS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); + ResultSet.Future eventsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_EVENTS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); CQLRow sessRow = sessionsFuture.get().fetchOne(); if (sessRow != null) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 5427ec32a5e..9515e594a94 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -47,11 +47,11 @@ class RetryingCallback implements Connection.ResponseCallback { private volatile Map errors; - public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback, QueryOptions queryOptions) { + public RetryingCallback(Session.Manager manager, Connection.ResponseCallback callback, Query query) { this.manager = manager; this.callback = callback; - this.queryPlan = manager.loadBalancer.newQueryPlan(queryOptions); + this.queryPlan = manager.loadBalancer.newQueryPlan(query); } public void sendRequest() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index a5a7f806db2..f21b4b8982b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -40,7 +40,7 @@ public class Session { /** * Execute the provided query. * - * This method is a shortcut for {@code execute(query, null)}. + * This method is a shortcut for {@code execute(new SimpleStatement(query))}. * * @param query the CQL query to execute. * @return the result of the query. That result will never be null but can @@ -55,33 +55,7 @@ public class Session { * unauthorized or any other validation problem). */ public ResultSet execute(String query) throws NoHostAvailableException { - return execute(query, QueryOptions.DEFAULT); - } - - /** - * Execute the provided query. - * - * This method is a shortcut for {@code execute(query, new QueryOptions(consistency))}. - * - * @param query the CQL query to execute. - * @param consistency the consistency level for the operation. If the query - * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE - * queries for instance), this argument is ignored and null can be - * provided. However, if null is provided while the query requires a - * consistency level, the default consistency level of ONE is used. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - */ - public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHostAvailableException { - return execute(query, new QueryOptions(consistency)); + return execute(new SimpleStatement(query)); } /** @@ -94,9 +68,10 @@ public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHo * guarantee that if the request is invalid, an exception will be thrown * by this method. * - * @param query the CQL query to execute. - * @param options the options to use for this query. This includes at - * least the consistency level for the operation. + * @param query the CQL query to execute (that can be either a {@code + * CQLStatement} or a {@code BoundStatement}). If it is a {@code + * BoundStatement}, all variables must have been bound (the statement must + * be ready). * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). * @@ -107,40 +82,24 @@ public ResultSet execute(String query, ConsistencyLevel consistency) throws NoHo * the query with the requested consistency level successfully. * @throws QueryValidationException if the query if invalid (syntax error, * unauthorized or any other validation problem). + * @throws IllegalStateException if {@code query} is a {@code BoundStatement} + * but {@code !query.isReady()}. */ - public ResultSet execute(String query, QueryOptions options) throws NoHostAvailableException { - return executeAsync(query, options).getUninterruptibly(); + public ResultSet execute(Query query) throws NoHostAvailableException { + return executeAsync(query).getUninterruptibly(); } /** * Execute the provided query asynchronously. * - * This method is a shortcut for {@code executeAsync(query, null)}. + * This method is a shortcut for {@code executeAsync(new SimpleStatement(query))}. * * @param query the CQL query to execute. * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). */ public ResultSet.Future executeAsync(String query) { - return executeAsync(query, QueryOptions.DEFAULT); - } - - /** - * Execute the provided query asynchronously. - * - * This method is a shortcut for {@code executeAsync(query, new QueryOptions(consistency))}. - * - * @param query the CQL query to execute. - * @param consistency the consistency level for the operation. If the query - * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE - * queries for instance), this argument is ignored and null can be - * provided. However, if null is provided while the query requires a - * consistency level, the default consistency level of ONE is used. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - */ - public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) { - return executeAsync(query, new QueryOptions(consistency)); + return executeAsync(new SimpleStatement(query)); } /** @@ -156,14 +115,29 @@ public ResultSet.Future executeAsync(String query, ConsistencyLevel consistency) * DELETE), you will need to access the ResultSet (i.e. call any of its * method) to make sure the query was successful. * - * @param query the CQL query to execute. - * @param options the options to use for this query. This includes at least - * the consistency level for the operation. + * @param query the CQL query to execute (that can be either a {@code + * CQLStatement} or a {@code BoundStatement}). If it is a {@code + * BoundStatement}, all variables must have been bound (the statement must + * be ready). * @return the result of the query. That result will never be null but can * be empty (and will be for any non SELECT query). + * + * @throws IllegalStateException if {@code query} is a {@code BoundStatement} + * but {@code !query.isReady()}. */ - public ResultSet.Future executeAsync(String query, QueryOptions options) { - return manager.executeQuery(new QueryMessage(query, ConsistencyLevel.toCassandraCL(options.getConsistencyLevel())), options); + public ResultSet.Future executeAsync(Query query) { + + if (query instanceof CQLStatement) { + return manager.executeQuery(new QueryMessage(((CQLStatement)query).getQueryString(), ConsistencyLevel.toCassandraCL(query.getConsistencyLevel())), query); + } else { + assert query instanceof BoundStatement : query; + + BoundStatement bs = (BoundStatement)query; + if (!bs.isReady()) + throw new IllegalStateException("Some bind variables haven't been bound in the provided statement"); + + return manager.executeQuery(new ExecuteMessage(bs.statement.id, Arrays.asList(bs.values), ConsistencyLevel.toCassandraCL(query.getConsistencyLevel())), query); + } } /** @@ -177,148 +151,10 @@ public ResultSet.Future executeAsync(String query, QueryOptions options) { */ public PreparedStatement prepare(String query) throws NoHostAvailableException { Connection.Future future = new Connection.Future(new PrepareMessage(query)); - manager.execute(future, QueryOptions.DEFAULT); + manager.execute(future, Query.DEFAULT); return toPreparedStatement(query, future); } - /** - * Execute a prepared statement that had values provided for its bound - * variables. - * - * This method is a shortcut for {@code executePrepared(stmt, null)}. - * - * @param stmt the prepared statement with values for its bound variables. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws IllegalStateException if {@code !stmt.ready()}. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - */ - public ResultSet executePrepared(BoundStatement stmt) throws NoHostAvailableException { - return executePrepared(stmt, new QueryOptions()); - } - - /** - * Execute a prepared statement that had values provided for its bound - * variables. - * - * This method is a shortcut for {@code executePrepared(stmt, new QueryOptions(consistency))}. - * - * @param stmt the prepared statement with values for its bound variables. - * @param consistency the consistency level for the operation. If the query - * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE - * queries for instance), this argument is ignored and null can be - * provided. However, if null is provided while the query requires a - * consistency level, the default consistency level of ONE is used. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws IllegalStateException if {@code !stmt.ready()}. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - */ - public ResultSet executePrepared(BoundStatement stmt, ConsistencyLevel consistency) throws NoHostAvailableException { - return executePrepared(stmt, new QueryOptions(consistency)); - } - - /** - * Execute a prepared statement that had values provided for its bound - * variables. - * - * This method performs like {@link #execute} but for prepared statements. - * It blocks until at least some result has been received from the - * database. - * - * @param stmt the prepared statement with values for its bound variables. - * @param options the options to use for this query. This includes at least - * the consistency level for the operation. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws IllegalStateException if {@code !stmt.ready()}. - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - */ - public ResultSet executePrepared(BoundStatement stmt, QueryOptions options) throws NoHostAvailableException { - return executePreparedAsync(stmt, options).getUninterruptibly(); - } - - /** - * Execute a prepared statement that had values provided for its bound - * variables asynchronously. - * - * This method is a shortcut for {@code executePreparedAsync(stmt, null)}. - * - * @param stmt the prepared statement with values for its bound variables. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws IllegalStateException if {@code !stmt.ready()}. - */ - public ResultSet.Future executePreparedAsync(BoundStatement stmt) { - return executePreparedAsync(stmt, new QueryOptions()); - } - - /** - * Execute a prepared statement that had values provided for its bound - * variables asynchronously. - * - * This method is a shortcut for {@code executePreparedAsync(stmt, new QueryOptions(consistency))}. - * - * @param stmt the prepared statement with values for its bound variables. - * @param consistency the consistency level for the operation. If the query - * doesn't need a consistency level (USE, CREATE, ALTER, DROP and TRUNCATE - * queries for instance), this argument is ignored and null can be - * provided. However, if null is provided while the query requires a - * consistency level, the default consistency level of ONE is used. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws IllegalStateException if {@code !stmt.ready()}. - */ - public ResultSet.Future executePreparedAsync(BoundStatement stmt, ConsistencyLevel consistency) { - return executePreparedAsync(stmt, new QueryOptions(consistency)); - } - - /** - * Execute a prepared statement that had values provided for its bound - * variables asynchronously. - *

- * This method performs like {@link #executeAsync} but for prepared - * statements. It return as soon as the query has been successfully sent to - * the database. - * - * @param stmt the prepared statement with values for its bound variables. - * @param queryOptions the options to use for this query. This includes at - * least the consistency level for the operation. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). - * - * @throws IllegalStateException if {@code !stmt.ready()}. - */ - public ResultSet.Future executePreparedAsync(BoundStatement stmt, QueryOptions queryOptions) { - if (!stmt.isReady()) - throw new IllegalStateException("Some bind variables haven't been bound in the provided statement"); - - return manager.executeQuery(new ExecuteMessage(stmt.statement.id, Arrays.asList(stmt.values), ConsistencyLevel.toCassandraCL(queryOptions.getConsistencyLevel())), queryOptions); - } - /** * Shutdown this session instance. *

@@ -501,7 +337,7 @@ public void onRemove(Host host) { public void setKeyspace(String keyspace) throws NoHostAvailableException { try { - executeQuery(new QueryMessage("use " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL), new QueryOptions()).get(); + executeQuery(new QueryMessage("use " + keyspace, ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT).get(); } catch (InterruptedException e) { // If we're interrupted, then fine, we stop waiting, but the user shouldn't complain if the keyspace is not set. } catch (ExecutionException e) { @@ -522,8 +358,8 @@ else if (cause instanceof DriverUncheckedException) * This method will find a suitable node to connect to using the * {@link LoadBalancingPolicy} and handle host failover. */ - public void execute(Connection.ResponseCallback callback, QueryOptions options) { - new RetryingCallback(this, callback, options).sendRequest(); + public void execute(Connection.ResponseCallback callback, Query query) { + new RetryingCallback(this, callback, query).sendRequest(); } public void prepare(String query, InetAddress toExclude) { @@ -556,12 +392,12 @@ public void prepare(String query, InetAddress toExclude) { } } - public ResultSet.Future executeQuery(Message.Request msg, QueryOptions options) { - if (options.isTracing()) + public ResultSet.Future executeQuery(Message.Request msg, Query query) { + if (query.isTracing()) msg.setTracingRequested(); ResultSet.Future future = new ResultSet.Future(this, msg); - execute(future.callback, options); + execute(future.callback, query); return future; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java new file mode 100644 index 00000000000..4ff25909d76 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -0,0 +1,18 @@ +package com.datastax.driver.core; + +/** + * A simple {@code CQLStatement} implementation built directly from a query + * string. + */ +public class SimpleStatement extends CQLStatement { + + private final String query; + + public SimpleStatement(String query) { + this.query = query; + } + + public String getQueryString() { + return query; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java index 8b47763b214..e0abc4de127 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -52,12 +52,12 @@ public interface LoadBalancingPolicy extends Host.StateListener { * used. If all hosts of the returned {@code Iterator} are down, the query * will fail. * - * @param queryOptions the options used for the query. + * @param query the query for which to build a plan. * @return an iterator of Host. The query is tried against the hosts * returned by this iterator in order, until the query has been sent * successfully to one of the host. */ - public Iterator newQueryPlan(QueryOptions queryOptions); + public Iterator newQueryPlan(Query query); /** * Simple factory interface to allow creating {@link LoadBalancingPolicy} instances. @@ -119,7 +119,7 @@ public HostDistance distance(Host host) { * @return a new query plan, i.e. an iterator indicating which host to * try first for querying, which one to use as failover, etc... */ - public Iterator newQueryPlan(QueryOptions queryOptions) { + public Iterator newQueryPlan(Query query) { // We clone liveHosts because we want a version of the list that // cannot change concurrently of the query plan iterator (this @@ -262,7 +262,7 @@ public HostDistance distance(Host host) { * @return a new query plan, i.e. an iterator indicating which host to * try first for querying, which one to use as failover, etc... */ - public Iterator newQueryPlan(QueryOptions queryOptions) { + public Iterator newQueryPlan(Query query) { CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); final List hosts = localLiveHosts == null ? Collections.emptyList() : (List)localLiveHosts.clone(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index 22917aa6fc4..d5558134f2d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -27,7 +27,7 @@ public class CCMBridge { private static final Logger logger = Logger.getLogger(CCMBridge.class); - private static final String CASSANDRA_VERSION_REGEXP = "cassandra-\\d\\.\\d\\.\\d(-\\w+)?"; + private static final String CASSANDRA_VERSION_REGEXP = "\\d\\.\\d\\.\\d(-\\w+)?"; private static final File CASSANDRA_DIR; private static final String CASSANDRA_VERSION; diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index 1fad5dc8069..b79e0f13b62 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -90,7 +90,7 @@ public void preparedNativeTest() throws NoHostAvailableException { String name = "c_" + type; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_native', ?)", ALL_NATIVE_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); - session.executePrepared(setBoundValue(bs, name, type, getFixedValue(type))); + session.execute(setBoundValue(bs, name, type, getFixedValue(type))); CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)).fetchOne(); assertEquals("For type " + type, getFixedValue(type), getValue(row, name, type)); @@ -110,7 +110,7 @@ public void prepareListTest() throws NoHostAvailableException { List value = (List)getFixedValue(type);; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); - session.executePrepared(setBoundValue(bs, name, type, value)); + session.execute(setBoundValue(bs, name, type, value)); CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)).fetchOne(); assertEquals("For type " + type, value, getValue(row, name, type)); @@ -130,7 +130,7 @@ public void prepareSetTest() throws NoHostAvailableException { Set value = (Set)getFixedValue(type);; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); - session.executePrepared(setBoundValue(bs, name, type, value)); + session.execute(setBoundValue(bs, name, type, value)); CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)).fetchOne(); assertEquals("For type " + type, value, getValue(row, name, type)); @@ -155,7 +155,7 @@ public void prepareMapTest() throws NoHostAvailableException { Map value = (Map)getFixedValue(type);; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); - session.executePrepared(setBoundValue(bs, name, type, value)); + session.execute(setBoundValue(bs, name, type, value)); CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)).fetchOne(); assertEquals("For type " + type, value, getValue(row, name, type)); diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 61aad733199..4b5d2cb34b2 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -30,18 +30,17 @@ public void executeTest() throws Exception { // execute checkExecuteResultSet(session.execute(String.format(SELECT_ALL_FORMAT, TABLE)), key); - checkExecuteResultSet(session.execute(String.format(SELECT_ALL_FORMAT, TABLE), ConsistencyLevel.ONE), key); - checkExecuteResultSet(session.execute(String.format(SELECT_ALL_FORMAT, TABLE), new QueryOptions(ConsistencyLevel.ONE)), key); + checkExecuteResultSet(session.execute(new SimpleStatement(String.format(SELECT_ALL_FORMAT, TABLE)).setConsistencyLevel(ConsistencyLevel.ONE)), key); // executeAsync checkExecuteResultSet(session.executeAsync(String.format(SELECT_ALL_FORMAT, TABLE)).getUninterruptibly(), key); - checkExecuteResultSet(session.executeAsync(String.format(SELECT_ALL_FORMAT, TABLE), ConsistencyLevel.ONE).getUninterruptibly(), key); - checkExecuteResultSet(session.executeAsync(String.format(SELECT_ALL_FORMAT, TABLE), new QueryOptions(ConsistencyLevel.ONE)).getUninterruptibly(), key); + checkExecuteResultSet(session.executeAsync(new SimpleStatement(String.format(SELECT_ALL_FORMAT, TABLE)).setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), key); } @Test public void executePreparedTest() throws Exception { - // Simple calls to all versions of the executePrepared/executePreparedAsync methods + // Simple calls to all versions of the execute/executeAsync methods for prepared statements + // Note: the goal is only to exercice the Session methods, PreparedStatementTest have better prepared statement tests. String key = "execute_prepared_test"; ResultSet rs = session.execute(String.format(INSERT_FORMAT, TABLE, key, "foo", 42, 24.03f)); assertTrue(rs.isExhausted()); @@ -50,14 +49,12 @@ public void executePreparedTest() throws Exception { BoundStatement bs = p.bind(key); // executePrepared - checkExecuteResultSet(session.executePrepared(bs), key); - checkExecuteResultSet(session.executePrepared(bs, ConsistencyLevel.ONE), key); - checkExecuteResultSet(session.executePrepared(bs, new QueryOptions(ConsistencyLevel.ONE)), key); + checkExecuteResultSet(session.execute(bs), key); + checkExecuteResultSet(session.execute(bs.setConsistencyLevel(ConsistencyLevel.ONE)), key); // executePreparedAsync - checkExecuteResultSet(session.executePreparedAsync(bs).getUninterruptibly(), key); - checkExecuteResultSet(session.executePreparedAsync(bs, ConsistencyLevel.ONE).getUninterruptibly(), key); - checkExecuteResultSet(session.executePreparedAsync(bs, new QueryOptions(ConsistencyLevel.ONE)).getUninterruptibly(), key); + checkExecuteResultSet(session.executeAsync(bs).getUninterruptibly(), key); + checkExecuteResultSet(session.executeAsync(bs.setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), key); } private static void checkExecuteResultSet(ResultSet rs, String key) { From bb9f63a2e5984b8236561a2f2d052c051d20b984 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Tue, 13 Nov 2012 09:27:12 +0100 Subject: [PATCH 071/719] Add optional token awareness --- .../datastax/driver/core/BoundStatement.java | 35 ++++ .../com/datastax/driver/core/Cluster.java | 6 +- .../datastax/driver/core/ClusterMetadata.java | 88 +++++++++ .../driver/core/ControlConnection.java | 184 ++++++++++-------- .../driver/core/PreparedStatement.java | 98 +++++++++- .../java/com/datastax/driver/core/Query.java | 18 +- .../com/datastax/driver/core/Session.java | 4 +- .../datastax/driver/core/SimpleStatement.java | 91 +++++++++ .../java/com/datastax/driver/core/Token.java | 148 ++++++++++++++ .../core/policies/LoadBalancingPolicy.java | 129 +++++++++++- .../com/datastax/driver/core/CCMBridge.java | 3 + .../driver/core/PreparedStatementTest.java | 7 + 12 files changed, 712 insertions(+), 99 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/Token.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index 482cc6d108c..b7b17882f00 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -157,6 +157,41 @@ public BoundStatement bind(Object... values) { return this; } + /** + * The routing key for this bound query. + *

+ * This method will return a non-{@code null} value if: + *

    + *
  • either all the columns composing the partition key are bound + * variables of this {@code BoundStatement}. The routing key will then be + * built using the values provided for these partition key columns.
  • + *
  • or the routing key has been set through {@link PreparedStatement#setRoutingKey} + * for the {@code PreparedStatement} this statement has been built from.
  • + *
+ * Otherwise, {@code null} is returned. + *

+ * Note that if the routing key has been set through {@link PreparedStatement#setRoutingKey}, + * that value takes precedence even if the partition key is part of the bound variables. + * + * @return the routing key for this statement or {@code null}. + */ + public ByteBuffer getRoutingKey() { + if (statement.routingKey != null) + return statement.routingKey; + + if (statement.routingKeyIndexes != null) { + if (statement.routingKeyIndexes.length == 1) { + return values[statement.routingKeyIndexes[0]]; + } else { + ByteBuffer[] components = new ByteBuffer[statement.routingKeyIndexes.length]; + for (int i = 0; i < components.length; ++i) + components[i] = values[statement.routingKeyIndexes[i]]; + return SimpleStatement.compose(components); + } + } + return null; + } + /** * Set the {@code i}th value to the provided boolean. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index c0323f84961..a30460fe6e2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -475,10 +475,14 @@ private Manager(List contactPoints, int port, Policies policies, Au for (InetAddress address : contactPoints) addHost(address, false); - this.controlConnection = new ControlConnection(this); + this.controlConnection = new ControlConnection(this, metadata); controlConnection.connect(); } + Cluster getCluster() { + return Cluster.this; + } + private Session newSession() { Session session = new Session(Cluster.this, metadata.allHosts()); sessions.add(session); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index c11b5708e8f..72f9878b44c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -1,6 +1,7 @@ package com.datastax.driver.core; import java.net.InetAddress; +import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -22,6 +23,8 @@ public class ClusterMetadata { private final ConcurrentMap hosts = new ConcurrentHashMap(); private final ConcurrentMap keyspaces = new ConcurrentHashMap(); + private volatile TokenMap tokenMap; + ClusterMetadata(Cluster.Manager cluster) { this.cluster = cluster; } @@ -114,6 +117,10 @@ private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows } } + synchronized void rebuildTokenMap(String partitioner, Map> allTokens) { + this.tokenMap = TokenMap.build(partitioner, allTokens); + } + Host add(InetAddress address) { Host newHost = new Host(address, cluster.convictionPolicyFactory); Host previous = hosts.putIfAbsent(address, newHost); @@ -141,6 +148,28 @@ Collection allHosts() { return hosts.values(); } + /** + * The set of hosts that are replica for a given partition key. + *

+ * Note that this method is a best effort method. Consumers should not rely + * too heavily on the result of this method not being stale (or even empty). + * + * @param partitionKey the partition key for which to find the set of + * replica. + * @return the (immutable) set of replicas for {@code partitionKey} as know + * by the driver. No strong guarantee is provided on the stalelessness of + * this information. It is also not guarantee that the returned set won't + * be empty (which is then some form of staleness). + */ + public Set getReplicas(ByteBuffer partitionKey) { + TokenMap current = tokenMap; + if (current == null) { + return Collections.emptySet(); + } else { + return current.getReplicas(current.factory.hash(partitionKey)); + } + } + /** * The Cassandra name for the cluster connect to. * @@ -201,4 +230,63 @@ public String exportSchemaAsString() { return sb.toString(); } + + static class TokenMap> { + + private final Token.Factory factory; + private final Map, Set> tokenToHosts; + private final List> ring; + + private TokenMap(Token.Factory factory, Map, Set> tokenToHosts, List> ring) { + this.factory = factory; + this.tokenToHosts = tokenToHosts; + this.ring = ring; + } + + public static > TokenMap build(String partitioner, Map> allTokens) { + + Token.Factory factory = (Token.Factory)Token.getFactory(partitioner); + if (factory == null) + return null; + + Map, Set> tokenToHosts = new HashMap, Set>(); + Set> allSorted = new TreeSet>(); + + for (Map.Entry> entry : allTokens.entrySet()) { + Host host = entry.getKey(); + for (String tokenStr : entry.getValue()) { + try { + Token t = factory.fromString(tokenStr); + allSorted.add(t); + Set hosts = tokenToHosts.get(t); + if (hosts == null) { + hosts = new HashSet(); + tokenToHosts.put(t, hosts); + } + hosts.add(host); + } catch (IllegalArgumentException e) { + // If we failed parsing that token, skip it + } + } + } + // Make all the inet set immutable so we can share them publicly safely + for (Map.Entry, Set> entry: tokenToHosts.entrySet()) { + entry.setValue(Collections.unmodifiableSet(entry.getValue())); + } + return new TokenMap(factory, tokenToHosts, new ArrayList>(allSorted)); + } + + private Set getReplicas(T token) { + + // Find the primary replica + int i = Collections.binarySearch(ring, token); + if (i < 0) { + i = (i + 1) * (-1); + if (i >= ring.size()) + i = 0; + } + + return tokenToHosts.get(ring.get(i)); + } + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index de24f76c62e..a984ebfcf07 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -26,8 +26,10 @@ class ControlConnection implements Host.StateListener { private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; - private static final String SELECT_PEERS = "SELECT peer, data_center, rack FROM system.peers"; - private static final String SELECT_LOCAL = "SELECT cluster_name, data_center, rack FROM system.local WHERE key='local'"; + private static final String SELECT_PEERS = "SELECT peer, data_center, rack, tokens FROM system.peers"; + // TODO: fix once we have rc1 + //private static final String SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner FROM system.local WHERE key='local'"; + private static final String SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens FROM system.local WHERE key='local'"; private final AtomicReference connectionRef = new AtomicReference(); @@ -39,9 +41,9 @@ class ControlConnection implements Host.StateListener { private volatile boolean isShutdown; - public ControlConnection(Cluster.Manager cluster) { - this.cluster = cluster; - this.balancingPolicy = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE.create(cluster.metadata.allHosts()); + public ControlConnection(Cluster.Manager manager, ClusterMetadata metadata) { + this.cluster = manager; + this.balancingPolicy = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE.create(manager.getCluster(), metadata.allHosts()); } // Only for the initial connection. Does not schedule retries if it fails @@ -108,23 +110,32 @@ private Connection reconnectInternal() throws NoHostAvailableException { try { return tryConnect(host); } catch (ConnectionException e) { - if (errors == null) - errors = new HashMap(); - errors.put(e.address, e.getMessage()); - - if (logger.isDebugEnabled()) { - if (iter.hasNext()) { - logger.debug("[Control connection] Failed connecting to {}, trying next host", host); - } else { - logger.debug("[Control connection] Failed connecting to {}, no more host to try", host); - } - } + errors = logError(host, e.getMessage(), errors, iter); + } catch (ExecutionException e) { + errors = logError(host, e.getMessage(), errors, iter); + } catch (InterruptedException e) { + // If we're interrupted, just move on } } throw new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors); } - private Connection tryConnect(Host host) throws ConnectionException { + private static Map logError(Host host, String msg, Map errors, Iterator iter) { + if (errors == null) + errors = new HashMap(); + errors.put(host.getAddress(), msg); + + if (logger.isDebugEnabled()) { + if (iter.hasNext()) { + logger.debug("[Control connection] error on {} connection ({}), trying next host", host, msg); + } else { + logger.debug("[Control connection] error on {} connection ({}), no more host to try", host, msg); + } + } + return errors; + } + + private Connection tryConnect(Host host) throws ConnectionException, ExecutionException, InterruptedException { Connection connection = cluster.connectionFactory.open(host); try { @@ -136,8 +147,11 @@ private Connection tryConnect(Host host) throws ConnectionException { }); connection.write(new RegisterMessage(evs)); - refreshNodeList(connection); - refreshSchema(connection, null, null); + logger.debug(String.format("[Control connection] Refreshing node list and token map")); + refreshNodeListAndTokenMap(connection); + + logger.debug("[Control connection] Refreshing schema"); + refreshSchema(connection, null, null, cluster); return connection; } catch (BusyConnectionException e) { throw new DriverInternalError("Newly created connection should not be busy"); @@ -145,24 +159,20 @@ private Connection tryConnect(Host host) throws ConnectionException { } public void refreshSchema(String keyspace, String table) { - refreshSchema(connectionRef.get(), keyspace, table); - } - - public void refreshSchema(Connection connection, String keyspace, String table) { logger.debug("[Control connection] Refreshing schema for {}.{}", keyspace, table); try { - refreshSchema(connection, keyspace, table, cluster); + refreshSchema(connectionRef.get(), keyspace, table, cluster); } catch (ConnectionException e) { - logger.debug("[Control connection] Connection error when refeshing schema ({})", e.getMessage()); - reconnect(); - } catch (BusyConnectionException e) { - logger.debug("[Control connection] Connection is busy, reconnecting"); + logger.debug("[Control connection] Connection error while refeshing schema ({})", e.getMessage()); reconnect(); } catch (ExecutionException e) { logger.error("[Control connection] Unexpected error while refeshing schema", e); reconnect(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + reconnect(); } catch (InterruptedException e) { - // If we're interrupted, just move on + // Interrupted? Then moving on. } } @@ -189,68 +199,72 @@ static void refreshSchema(Connection connection, String keyspace, String table, cluster.metadata.rebuildSchema(keyspace, table, ksFuture == null ? null : ksFuture.get(), cfFuture.get(), colsFuture.get()); } - private void refreshNodeList(Connection connection) throws BusyConnectionException { - // Make sure we're up to date on node list - logger.debug(String.format("[Control connection] Refreshing node list")); - try { - ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - connection.write(peersFuture.callback); - connection.write(localFuture.callback); - - // Update cluster name, DC and rack for the one node we are connected to - CQLRow localRow = localFuture.get().fetchOne(); - if (localRow != null) { - String clusterName = localRow.getString("cluster_name"); - if (clusterName != null) - cluster.metadata.clusterName = clusterName; - - Host host = cluster.metadata.getHost(connection.address); - // In theory host can't be null. However there is no point in risking a NPE in case we - // have a race between a node removal and this. - if (host != null) - host.setLocationInfo(localRow.getString("data_center"), localRow.getString("rack")); - } - - List foundHosts = new ArrayList(); - List dcs = new ArrayList(); - List racks = new ArrayList(); + private void refreshNodeListAndTokenMap(Connection connection) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + // Make sure we're up to date on nodes and tokens + + ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + connection.write(peersFuture.callback); + connection.write(localFuture.callback); + + String partitioner = null; + Map> tokenMap = new HashMap>(); + + // Update cluster name, DC and rack for the one node we are connected to + CQLRow localRow = localFuture.get().fetchOne(); + if (localRow != null) { + String clusterName = localRow.getString("cluster_name"); + if (clusterName != null) + cluster.metadata.clusterName = clusterName; + + Host host = cluster.metadata.getHost(connection.address); + // In theory host can't be null. However there is no point in risking a NPE in case we + // have a race between a node removal and this. + if (host != null) + host.setLocationInfo(localRow.getString("data_center"), localRow.getString("rack")); + + // TODO:Fix once we have rc1 + //partitioner = localRow.getString("partitioner"); + partitioner = "org.apache.cassandra.dht.Murmur3Partitioner"; + Set tokens = localRow.getSet("tokens", String.class); + if (partitioner != null && !tokens.isEmpty()) + tokenMap.put(host, tokens); + } - for (CQLRow row : peersFuture.get()) { - if (!row.isNull("peer")) { - foundHosts.add(row.getInet("peer")); - dcs.add(row.getString("data_center")); - racks.add(row.getString("rack")); - } + List foundHosts = new ArrayList(); + List dcs = new ArrayList(); + List racks = new ArrayList(); + List> allTokens = new ArrayList>(); + + for (CQLRow row : peersFuture.get()) { + if (!row.isNull("peer")) { + foundHosts.add(row.getInet("peer")); + dcs.add(row.getString("data_center")); + racks.add(row.getString("rack")); + allTokens.add(row.getSet("tokens", String.class)); } + } - for (int i = 0; i < foundHosts.size(); i++) { - Host host = cluster.metadata.getHost(foundHosts.get(i)); - if (host == null) { - // We don't know that node, add it. - host = cluster.addHost(foundHosts.get(i), true); - } - host.setLocationInfo(dcs.get(i), racks.get(i)); + for (int i = 0; i < foundHosts.size(); i++) { + Host host = cluster.metadata.getHost(foundHosts.get(i)); + if (host == null) { + // We don't know that node, add it. + host = cluster.addHost(foundHosts.get(i), true); } + host.setLocationInfo(dcs.get(i), racks.get(i)); - // Removes all those that seems to have been removed (since we lost the control connection) - Set foundHostsSet = new HashSet(foundHosts); - for (Host host : cluster.metadata.allHosts()) - if (!host.getAddress().equals(connection.address) && !foundHostsSet.contains(host.getAddress())) - cluster.removeHost(host); - - } catch (ConnectionException e) { - logger.debug("[Control connection] Connection error when refeshing hosts list ({})", e.getMessage()); - reconnect(); - } catch (ExecutionException e) { - logger.error("[Control connection] Unexpected error while refeshing hosts list", e); - reconnect(); - } catch (BusyConnectionException e) { - logger.debug("[Control connection] Connection is busy, reconnecting"); - reconnect(); - } catch (InterruptedException e) { - // Interrupted? Then moving on. + if (partitioner != null && !allTokens.get(i).isEmpty()) + tokenMap.put(host, allTokens.get(i)); } + + // Removes all those that seems to have been removed (since we lost the control connection) + Set foundHostsSet = new HashSet(foundHosts); + for (Host host : cluster.metadata.allHosts()) + if (!host.getAddress().equals(connection.address) && !foundHostsSet.contains(host.getAddress())) + cluster.removeHost(host); + + if (partitioner != null) + cluster.metadata.rebuildTokenMap(partitioner, tokenMap); } public void onUp(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 17ac49a36f2..62aab1fbf20 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -1,5 +1,8 @@ package com.datastax.driver.core; +import java.nio.ByteBuffer; +import java.util.List; + import org.apache.cassandra.utils.MD5Digest; import org.apache.cassandra.transport.messages.ResultMessage; @@ -19,25 +22,72 @@ public class PreparedStatement { final ColumnDefinitions metadata; final MD5Digest id; - private PreparedStatement(ColumnDefinitions metadata, MD5Digest id) { + volatile ByteBuffer routingKey; + final int[] routingKeyIndexes; + + private PreparedStatement(ColumnDefinitions metadata, MD5Digest id, int[] routingKeyIndexes) { this.metadata = metadata; this.id = id; + this.routingKeyIndexes = routingKeyIndexes; } - static PreparedStatement fromMessage(ResultMessage.Prepared msg) { + static PreparedStatement fromMessage(ResultMessage.Prepared msg, ClusterMetadata clusterMetadata) { switch (msg.kind) { case PREPARED: ResultMessage.Prepared pmsg = (ResultMessage.Prepared)msg; ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[pmsg.metadata.names.size()]; - for (int i = 0; i < defs.length; i++) + if (defs.length == 0) + return new PreparedStatement(new ColumnDefinitions(defs), pmsg.statementId, null); + + List partitionKeyColumns = null; + int[] pkIndexes = null; + KeyspaceMetadata km = clusterMetadata.getKeyspace(pmsg.metadata.names.get(0).ksName); + if (km != null) { + TableMetadata tm = km.getTable(pmsg.metadata.names.get(0).cfName); + if (tm != null) { + partitionKeyColumns = tm.getPartitionKey(); + pkIndexes = new int[partitionKeyColumns.size()]; + for (int i = 0; i < pkIndexes.length; ++i) + pkIndexes[i] = -1; + } + } + + // Note: we rely on the fact CQL queries cannot span multiple tables. If that change, we'll have to get smarter. + for (int i = 0; i < defs.length; i++) { defs[i] = ColumnDefinitions.Definition.fromTransportSpecification(pmsg.metadata.names.get(i)); + maybeGetIndex(defs[i].getName(), i, partitionKeyColumns, pkIndexes); + } - return new PreparedStatement(new ColumnDefinitions(defs), pmsg.statementId); + return new PreparedStatement(new ColumnDefinitions(defs), pmsg.statementId, allSet(pkIndexes) ? pkIndexes : null); default: throw new DriverInternalError(String.format("%s response received when prepared statement received was expected", msg.kind)); } } + private static void maybeGetIndex(String name, int j, List pkColumns, int[] pkIndexes) { + if (pkColumns == null) + return; + + for (int i = 0; i < pkColumns.size(); ++i) { + if (name.equals(pkColumns.get(i).getName())) { + // We may have the same column prepared multiple times, but only pick the first value + pkIndexes[i] = j; + return; + } + } + } + + private static boolean allSet(int[] pkColumns) { + if (pkColumns == null) + return false; + + for (int i = 0; i < pkColumns.length; ++i) + if (pkColumns[i] < 0) + return false; + + return true; + } + /** * Returns metadata on the bounded variables of this prepared statement. * @@ -78,4 +128,44 @@ public BoundStatement bind(Object... values) { public BoundStatement newBoundStatement() { return new BoundStatement(this); } + + /** + * Set the routing key for this prepared statement. + *

+ * This method allows to manually provide a fixed routing key for all + * executions of this prepared statement. It is never mandatory to provide + * a routing key through this method and this method should only be used + * if the partition key of the prepared query is not part of the prepared + * variables (i.e. if the partition key is fixed). + *

+ * Note that if the partition key is part of the prepared variables, the + * routing key will be automatically computed once those variables are bound. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code PreparedStatement} object. + * + * @see Query#getRoutingKey + */ + public PreparedStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + /** + * Set the routing key for this query. + *

+ * See {@link #setRoutingKey(ByteBuffer)} for more information. This + * method is a variant for when the query partition key is composite and + * thus the routing key must be built from multiple values. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain + * the routing key. + * @return this {@code PreparedStatement} object. + * + * @see Query#getRoutingKey + */ + public PreparedStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = SimpleStatement.compose(routingKeyComponents); + return this; + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Query.java b/driver-core/src/main/java/com/datastax/driver/core/Query.java index 6bca927dc97..181c0970ee9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Query.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Query.java @@ -1,5 +1,7 @@ package com.datastax.driver.core; +import java.nio.ByteBuffer; + /** * An executable query. *

@@ -10,7 +12,7 @@ public abstract class Query { // An exception to the CQLStatement or BoundStatement rule above. This is // used when preparing a statement and for other internal queries. Do not expose publicly. - static final Query DEFAULT = new Query() {}; + static final Query DEFAULT = new Query() { public ByteBuffer getRoutingKey() { return null; } }; private volatile ConsistencyLevel consistency; private volatile boolean traceQuery; @@ -75,4 +77,18 @@ public boolean isTracing() { return traceQuery; } + /** + * The routing key (in binary raw form) to use for token aware routing of this query. + *

+ * The routing key is optional in the sense that implementers are free to + * return {@code null}. The routing key is an hint used for token aware routing (see + * {@link LoadBalancingPolicy.TokenAware}), and if provided should + * correspond to the binary value for the query partition key. However, not + * providing a routing key never causes a query to fail and if the load + * balancing policy used is not token aware, then the routing key can be + * safely ignored. + * + * @return the routing key for this query or {@code null}. + */ + public abstract ByteBuffer getRoutingKey(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index f21b4b8982b..4d35d1a7574 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -204,7 +204,7 @@ private PreparedStatement toPreparedStatement(String query, Connection.Future fu case PREPARED: ResultMessage.Prepared pmsg = (ResultMessage.Prepared)rm; manager.cluster.manager.prepare(pmsg.statementId, query, future.getAddress()); - return PreparedStatement.fromMessage(pmsg); + return PreparedStatement.fromMessage(pmsg, manager.cluster.getMetadata()); default: throw new DriverInternalError(String.format("%s response received when prepared statement was expected", rm.kind)); } @@ -248,7 +248,7 @@ public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; this.pools = new ConcurrentHashMap(hosts.size()); - this.loadBalancer = cluster.manager.configuration.getPolicies().getLoadBalancingPolicyFactory().create(hosts); + this.loadBalancer = cluster.manager.configuration.getPolicies().getLoadBalancingPolicyFactory().create(cluster, hosts); this.poolsState = new HostConnectionPool.PoolState(); for (Host host : hosts) diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java index 4ff25909d76..7dd378762da 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -1,5 +1,7 @@ package com.datastax.driver.core; +import java.nio.ByteBuffer; + /** * A simple {@code CQLStatement} implementation built directly from a query * string. @@ -7,12 +9,101 @@ public class SimpleStatement extends CQLStatement { private final String query; + private volatile ByteBuffer routingKey; + /** + * Creates a new {@code SimpleStatement} with the provided query string. + * + * @param query the query string. + */ public SimpleStatement(String query) { this.query = query; } + /** + * The query string. + * + * @return the query string; + */ public String getQueryString() { return query; } + + /** + * The routing key for the query. + *

+ * Note that unless the routing key has been explicitly set through + * {@link #setRoutingKey}, this will method will return {@code null} (to + * avoid having to parse the query string to retrieve the partition key). + * + * @return the routing key set through {@link #setRoutingKey} is such a key + * was set, {@code null} otherwise. + * + * @see Query#getRoutingKey + */ + public ByteBuffer getRoutingKey() { + return routingKey; + } + + /** + * Set the routing key for this query. + *

+ * This method allows to manually provide a routing key for this query. It + * is thus optional since the routing key is only an hint for token aware + * load balancing policy but is never mandatory. + *

+ * If the partition key for the query is composite, use the + * {@link #setPartitionKey(ByteBuffer...)} method instead to build the + * routing key. + * + * @param routingKey the raw (binary) value to use as routing key. + * @return this {@code SimpleStatement} object. + * + * @see Query#getRoutingKey + */ + public SimpleStatement setRoutingKey(ByteBuffer routingKey) { + this.routingKey = routingKey; + return this; + } + + /** + * Set the routing key for this query. + *

+ * See {@link #setRoutingKey(ByteBuffer)} for more information. This + * method is a variant for when the query partition key is composite and + * thus the routing key must be built from multiple values. + * + * @param routingKeyComponents the raw (binary) values to compose to obtain + * the routing key. + * @return this {@code SimpleStatement} object. + * + * @see Query#getRoutingKey + */ + public SimpleStatement setRoutingKey(ByteBuffer... routingKeyComponents) { + this.routingKey = compose(routingKeyComponents); + return this; + } + + // TODO: we could find that a better place (but it's not expose so it doesn't matter too much) + static ByteBuffer compose(ByteBuffer... buffers) { + int totalLength = 0; + for (ByteBuffer bb : buffers) + totalLength += 2 + bb.remaining() + 1; + + ByteBuffer out = ByteBuffer.allocate(totalLength); + for (ByteBuffer bb : buffers) + { + putShortLength(out, bb.remaining()); + out.put(bb); + out.put((byte) 0); + } + out.flip(); + return out; + } + + private static void putShortLength(ByteBuffer bb, int length) + { + bb.put((byte) ((length >> 8) & 0xFF)); + bb.put((byte) (length & 0xFF)); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Token.java b/driver-core/src/main/java/com/datastax/driver/core/Token.java new file mode 100644 index 00000000000..1b4e253941b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Token.java @@ -0,0 +1,148 @@ +package com.datastax.driver.core; + +import java.math.BigInteger; +import java.nio.ByteBuffer; + +import org.apache.cassandra.utils.ByteBufferUtil; +import org.apache.cassandra.utils.FBUtilities; +import org.apache.cassandra.utils.MurmurHash; + +// We really only use the generic for type safety and it's not an interface because we don't want to expose +// Note: we may want to expose this later if people use custom partitioner and want to be able to extend that. This is way premature however. +abstract class Token> implements Comparable { + + public static Token.Factory getFactory(String partitionerName) { + if (partitionerName.endsWith("Murmur3Partitioner")) + return M3PToken.FACTORY; + else if (partitionerName.endsWith("RandomPartitioner")) + return RPToken.FACTORY; + else if (partitionerName.endsWith("OrderedPartitioner")) + return OPPToken.FACTORY; + else + return null; + } + + public interface Factory> { + public T fromString(String tokenStr); + public T hash(ByteBuffer partitionKey); + } + + // Murmur3Partitioner tokens + static class M3PToken extends Token { + private final long value; + + public static final Factory FACTORY = new Factory() { + public M3PToken fromString(String tokenStr) { + return new M3PToken(Long.parseLong(tokenStr)); + } + + public M3PToken hash(ByteBuffer partitionKey) { + long v = MurmurHash.hash3_x64_128(partitionKey, partitionKey.position(), partitionKey.remaining(), 0)[0]; + return new M3PToken(v == Long.MIN_VALUE ? Long.MAX_VALUE : v); + } + }; + + private M3PToken(long value) { + this.value = value; + } + + @Override + public int compareTo(M3PToken other) { + long otherValue = other.value; + return value < otherValue ? -1 : (value == otherValue) ? 0 : 1; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || this.getClass() != obj.getClass()) + return false; + + return value == ((M3PToken)obj).value; + } + + @Override + public int hashCode() { + return (int)(value^(value>>>32)); + } + } + + // OPPartitioner tokens + static class OPPToken extends Token { + private final ByteBuffer value; + + public static final Factory FACTORY = new Factory() { + public OPPToken fromString(String tokenStr) { + return new OPPToken(ByteBufferUtil.bytes(tokenStr)); + } + + public OPPToken hash(ByteBuffer partitionKey) { + return new OPPToken(partitionKey); + } + }; + + private OPPToken(ByteBuffer value) { + this.value = value; + } + + @Override + public int compareTo(OPPToken other) { + return value.compareTo(other.value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || this.getClass() != obj.getClass()) + return false; + + return value.equals(((OPPToken)obj).value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + } + + // RandomPartitioner tokens + static class RPToken extends Token { + private final BigInteger value; + + public static final Factory FACTORY = new Factory() { + public RPToken fromString(String tokenStr) { + return new RPToken(new BigInteger(tokenStr)); + } + + public RPToken hash(ByteBuffer partitionKey) { + return new RPToken(FBUtilities.hashToBigInteger(partitionKey)); + } + }; + + private RPToken(BigInteger value) { + this.value = value; + } + + @Override + public int compareTo(RPToken other) { + return value.compareTo(other.value); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || this.getClass() != obj.getClass()) + return false; + + return value.equals(((RPToken)obj).value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java index e0abc4de127..5b9709822c5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -1,5 +1,6 @@ package com.datastax.driver.core.policies; +import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; @@ -67,10 +68,11 @@ public interface Factory { /** * Creates a new LoadBalancingPolicy instance over the provided (initial) {@code hosts}. * + * @param cluster the {@code Cluster} instance for which the policy is created. * @param hosts the initial hosts to use. * @return the newly created {@link LoadBalancingPolicy} instance. */ - public LoadBalancingPolicy create(Collection hosts); + public LoadBalancingPolicy create(Cluster cluster, Collection hosts); } /** @@ -173,7 +175,7 @@ public static class Factory implements LoadBalancingPolicy.Factory { private Factory() {} - public LoadBalancingPolicy create(Collection hosts) { + public LoadBalancingPolicy create(Cluster cluster, Collection hosts) { return new RoundRobin(hosts); } } @@ -360,10 +362,10 @@ private Factory(String localDc, int usedHostsPerRemoteDc) { /** * Creates a new DCAwareRoundRobin policy factory given the name of * the local datacenter. - * + *

* The name of the local datacenter provided must be the local * datacenter name as known by Cassandra. - * + *

* The policy created by the returned factory will ignore all * remote hosts. In other words, this is equivalent to * {@code create(localDc, 0)}. @@ -380,7 +382,7 @@ public static Factory create(String localDc) { * Creates a new DCAwareRoundRobin policy factory given the name of * the local datacenter that use the provided number of host per * remote datacenter as failover for the local hosts. - * + *

* The name of the local datacenter provided must be the local * datacenter name as known by Cassandra. * @@ -399,9 +401,124 @@ public static Factory create(String localDc, int usedHostsPerRemoteDc) { return new Factory(localDc, usedHostsPerRemoteDc); } - public LoadBalancingPolicy create(Collection hosts) { + public LoadBalancingPolicy create(Cluster cluster, Collection hosts) { return new DCAwareRoundRobin(hosts, localDc, usedHostsPerRemoteDc); } } } + + /** + * A wrapper load balancing policy that add token awareness to a child policy. + *

+ * This policy encapsulates another policy. The resulting policy works in + * the following way: + *

    + *
  • the {@code distance} method is inherited from the child policy.
  • + *
  • the iterator return by the {@code newQueryPlan} method will first + * return the {@code LOCAL} replicas for the query (based on {@link Query#getRoutingKey}) + * if possible (i.e. if the query {@code getRoutingKey} method + * doesn't return {@code null} and if {@link ClusterMetadata#getReplicas} + * returns a non empty set of replicas for that partition key). If no + * local replica can be either found or successfully contacted, the rest + * of the query plan will fallback to one of the child policy.
  • + *
+ *

+ * Do note that only replica for which the child policy {@code distance} + * method returns {@code HostDistance.LOCAL} will be considered having + * priority. For example, if you wrap {@link DCAwareRoundRobin} with this + * token aware policy, replicas from remote data centers may only be + * returned after all the host of the local data center. + */ + public static class TokenAware implements LoadBalancingPolicy { + + private final ClusterMetadata clusterMetadata; + private final LoadBalancingPolicy childPolicy; + + private TokenAware(Cluster cluster, LoadBalancingPolicy childPolicy) { + this.clusterMetadata = cluster.getMetadata(); + this.childPolicy = childPolicy; + } + + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } + + public Iterator newQueryPlan(final Query query) { + + ByteBuffer partitionKey = query.getRoutingKey(); + if (partitionKey == null) + return childPolicy.newQueryPlan(query); + + final Set replicas = clusterMetadata.getReplicas(partitionKey); + if (replicas.isEmpty()) + return childPolicy.newQueryPlan(query); + + return new AbstractIterator() { + + private final Iterator iter = replicas.iterator(); + private Iterator childIterator; + + protected Host computeNext() { + while (iter.hasNext()) { + Host host = iter.next(); + if (host.getMonitor().isUp() && childPolicy.distance(host) == HostDistance.LOCAL) + return host; + } + + if (childIterator == null) + childIterator = childPolicy.newQueryPlan(query); + + while (childIterator.hasNext()) { + Host host = childIterator.next(); + // Skip it if it was already a local replica + if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL) + return host; + } + return endOfData(); + } + }; + } + + public void onUp(Host host) { + childPolicy.onUp(host); + } + + public void onDown(Host host) { + childPolicy.onDown(host); + } + + public void onAdd(Host host) { + childPolicy.onAdd(host); + } + + public void onRemove(Host host) { + childPolicy.onRemove(host); + } + + public static class Factory implements LoadBalancingPolicy.Factory { + + private final LoadBalancingPolicy.Factory childFactory; + + private Factory(LoadBalancingPolicy.Factory childFactory) { + this.childFactory = childFactory; + } + + /** + * Creates a new {@code TokenAware} policy factory that wraps + * policies build by the provided child load balancing policy + * factory. + * + * @param childFactory the factory for the load balancing policy to + * wrap with token awareness. + * @return the newly created factory. + */ + public static Factory create(LoadBalancingPolicy.Factory childFactory) { + return new Factory(childFactory); + } + + public LoadBalancingPolicy create(Cluster cluster, Collection hosts) { + return new TokenAware(cluster, childFactory.create(cluster, hosts)); + } + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index d5558134f2d..f3cee040b0d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -1,6 +1,7 @@ package com.datastax.driver.core; import java.io.*; +import java.net.InetAddress; import java.util.*; import org.junit.AfterClass; @@ -141,6 +142,8 @@ public static void createCluster() { session = cluster.connect(); } catch (NoHostAvailableException e) { erroredOut = true; + for (Map.Entry entry : e.getErrors().entrySet()) + logger.info("Error connecting to " + entry.getKey() + ": " + entry.getValue()); throw new RuntimeException(e); } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index b79e0f13b62..8bf9ff48a52 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -162,4 +162,11 @@ public void prepareMapTest() throws NoHostAvailableException { } } } + + //@Test + //public void prepareAppendListTest() throws NoHostAvailableException { + // PreparedStatement ps = session.prepare(String.format("UPDATE %s SET c_list_int = c_list_int + ? WHERE k = 'prepare_append_list'", ALL_LIST_TABLE)); + // BoundStatement bs = ps.newBoundStatement(); + // session.execute(bs.setList("c_list_int", Arrays.asList(1, 2, 3))); + //} } From 6f18f4c0e558848f0f9f806223d70c588143922b Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 14 Nov 2012 08:40:03 +0100 Subject: [PATCH 072/719] Add basic load balancing test --- .../core/AbstractReconnectionHandler.java | 6 +- .../com/datastax/driver/core/Cluster.java | 8 +- .../driver/core/ControlConnection.java | 24 +++ .../com/datastax/driver/core/QueryTrace.java | 5 +- .../com/datastax/driver/core/ResultSet.java | 35 ++-- .../com/datastax/driver/core/CCMBridge.java | 80 ++++++++- .../driver/core/LoadBalancingPolicyTest.java | 163 ++++++++++++++++++ .../driver/core/PreparedStatementTest.java | 2 - .../com/datastax/driver/core/TestUtils.java | 33 ++++ pom.xml | 5 +- 10 files changed, 328 insertions(+), 33 deletions(-) create mode 100644 driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index 8cc6b1aac5d..d43cc8ae636 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -36,9 +36,9 @@ public AbstractReconnectionHandler(ScheduledExecutorService executor, Reconnecti protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { return false; } public void start() { - executor.schedule(this, policy.nextDelayMs(), TimeUnit.MILLISECONDS); - - localFuture = executor.schedule(this, policy.nextDelayMs(), TimeUnit.MILLISECONDS); + long firstDelay = policy.nextDelayMs(); + logger.debug("First reconnection scheduled in {}ms", firstDelay); + localFuture = executor.schedule(this, firstDelay, TimeUnit.MILLISECONDS); // If there a previous task, cancel it, so only one reconnection handler runs. while (true) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index a30460fe6e2..6a6837c3de5 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -667,13 +667,13 @@ public void handle(Message.Response response) { final Event event = ((EventMessage)response).event; - logger.trace("Received event {}, scheduling delivery", response); + logger.debug("Received event {}, scheduling delivery", response); // When handle is called, the current thread is a network I/O thread, and we don't want to block // it (typically addHost() will create the connection pool to the new node, which can take time) - // Besides, events are usually sent a bit too early (since they're - // triggered once gossip is up, but that before the client-side - // server is up) so adds a second delay. + // Besides, events are usually sent a bit too early (since they're triggered once gossip is up, + // but that before the client-side server is up) so adds a 1 second delay. + // TODO: this delay is honestly quite random. We should do something on the C* side to fix that. scheduledTasksExecutor.schedule(new Runnable() { public void run() { switch (event.type) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index a984ebfcf07..3320200ae32 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -199,6 +199,29 @@ static void refreshSchema(Connection connection, String keyspace, String table, cluster.metadata.rebuildSchema(keyspace, table, ksFuture == null ? null : ksFuture.get(), cfFuture.get(), colsFuture.get()); } + private void refreshNodeListAndTokenMap() { + Connection c = connectionRef.get(); + // At startup, when we add the initial nodes, this will be null, which is ok + if (c == null) + return; + + logger.debug(String.format("[Control connection] Refreshing node list and token map")); + try { + refreshNodeListAndTokenMap(connectionRef.get()); + } catch (ConnectionException e) { + logger.debug("[Control connection] Connection error while refeshing node list and token map ({})", e.getMessage()); + reconnect(); + } catch (ExecutionException e) { + logger.error("[Control connection] Unexpected error while refeshing node list and token map", e); + reconnect(); + } catch (BusyConnectionException e) { + logger.debug("[Control connection] Connection is busy, reconnecting"); + reconnect(); + } catch (InterruptedException e) { + // Interrupted? Then moving on. + } + } + private void refreshNodeListAndTokenMap(Connection connection) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { // Make sure we're up to date on nodes and tokens @@ -284,6 +307,7 @@ public void onDown(Host host) { public void onAdd(Host host) { balancingPolicy.onAdd(host); + refreshNodeListAndTokenMap(); } public void onRemove(Host host) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java index 5044b07ea69..6ea831fa694 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -139,8 +139,9 @@ private void maybeFetchTrace() { try { // If by the time we grab the lock we've fetch the events, it's // fine, move on. Otherwise, fetch them. - if (duration == Integer.MIN_VALUE) + if (duration == Integer.MIN_VALUE) { doFetchTrace(); + } } finally { fetchLock.unlock(); } @@ -159,7 +160,7 @@ private void doFetchTrace() { coordinator = sessRow.getInet("coordinator"); if (!sessRow.isNull("parameters")) parameters = Collections.unmodifiableMap(sessRow.getMap("parameters", String.class, String.class)); - startedAt = sessRow.getLong("started_at"); + startedAt = sessRow.getDate("started_at").getTime(); } events = new ArrayList(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index a0065f11442..158b99eb014 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -1,6 +1,6 @@ package com.datastax.driver.core; -import java.net.InetSocketAddress; +import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.*; import java.util.concurrent.ExecutionException; @@ -29,47 +29,56 @@ public class ResultSet implements Iterable { private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); private static final Queue> EMPTY_QUEUE = new ArrayDeque(0); - private static final ResultSet EMPTY = new ResultSet(ColumnDefinitions.EMPTY, EMPTY_QUEUE, null); + private static final ResultSet EMPTY = new ResultSet(ColumnDefinitions.EMPTY, EMPTY_QUEUE, null, null); private final ColumnDefinitions metadata; private final Queue> rows; private final QueryTrace trace; - private ResultSet(ColumnDefinitions metadata, Queue> rows, QueryTrace trace) { + private final InetAddress queriedHost; + + private ResultSet(ColumnDefinitions metadata, Queue> rows, QueryTrace trace, InetAddress queriedHost) { this.metadata = metadata; this.rows = rows; this.trace = trace; + this.queriedHost = queriedHost; } - private static ResultSet fromMessage(ResultMessage msg, Session.Manager session) { + private static ResultSet fromMessage(ResultMessage msg, Session.Manager session, InetAddress queriedHost) { UUID tracingId = msg.getTracingId(); QueryTrace trace = tracingId == null ? null : new QueryTrace(tracingId, session); switch (msg.kind) { case VOID: - return empty(trace); + return empty(trace, queriedHost); case ROWS: ResultMessage.Rows r = (ResultMessage.Rows)msg; ColumnDefinitions.Definition[] defs = new ColumnDefinitions.Definition[r.result.metadata.names.size()]; for (int i = 0; i < defs.length; i++) defs[i] = ColumnDefinitions.Definition.fromTransportSpecification(r.result.metadata.names.get(i)); - return new ResultSet(new ColumnDefinitions(defs), new ArrayDeque(r.result.rows), trace); + return new ResultSet(new ColumnDefinitions(defs), new ArrayDeque(r.result.rows), trace, queriedHost); case SET_KEYSPACE: case SCHEMA_CHANGE: - return empty(trace); + return empty(trace, queriedHost); case PREPARED: throw new RuntimeException("Prepared statement received when a ResultSet was expected"); default: logger.error("Received unknow result type '{}'; returning empty result set", msg.kind); - return empty(trace); + return empty(trace, queriedHost); } } - private static ResultSet empty(QueryTrace trace) { - return trace == null ? EMPTY : new ResultSet(ColumnDefinitions.EMPTY, EMPTY_QUEUE, trace); + private static ResultSet empty(QueryTrace trace, InetAddress queriedHost) { + return trace == null ? EMPTY : new ResultSet(ColumnDefinitions.EMPTY, EMPTY_QUEUE, trace, queriedHost); + } + + // Note: we don't really want to expose this publicly, partly because we don't return it with empty result set. + // But for now this is convenient for tests. We'll see later if we want another solution. + InetAddress getQueriedHost() { + return queriedHost; } /** @@ -192,11 +201,11 @@ public void onSet(Connection connection, Message.Response response) { case SET_KEYSPACE: // propagate the keyspace change to other connections session.poolsState.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); - set(ResultSet.fromMessage(rm, session)); + set(ResultSet.fromMessage(rm, session, connection.address)); break; case SCHEMA_CHANGE: ResultMessage.SchemaChange scc = (ResultMessage.SchemaChange)rm; - ResultSet rs = ResultSet.fromMessage(rm, session); + ResultSet rs = ResultSet.fromMessage(rm, session, connection.address); switch (scc.change) { case CREATED: if (scc.columnFamily.isEmpty()) { @@ -225,7 +234,7 @@ public void onSet(Connection connection, Message.Response response) { } break; default: - set(ResultSet.fromMessage(rm, session)); + set(ResultSet.fromMessage(rm, session, connection.address)); break; } break; diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index f3cee040b0d..05fbc863329 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -23,7 +23,7 @@ public class CCMBridge { static { Logger rootLogger = Logger.getRootLogger(); rootLogger.setLevel(Level.INFO); - rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); + rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%d{HH:mm:ss,SSS} %-5p [%t]: %m%n"))); } private static final Logger logger = Logger.getLogger(CCMBridge.class); @@ -54,19 +54,29 @@ private CCMBridge() public static CCMBridge create(String name) { CCMBridge bridge = new CCMBridge(); bridge.execute("ccm create %s -b %s", name, CASSANDRA_VERSION); - // Small sleep, otherwise the cluster is not always available because ccm create don't wait for the client server to be up - //try { Thread.sleep(500); } catch (InterruptedException e) {} return bridge; } public static CCMBridge create(String name, int nbNodes) { CCMBridge bridge = new CCMBridge(); bridge.execute("ccm create %s -n %d -s -b %s", name, nbNodes, CASSANDRA_VERSION); - // See above - //try { Thread.sleep(500); } catch (InterruptedException e) {} return bridge; } + public static CCMBridge create(String name, int nbNodesDC1, int nbNodesDC2) { + CCMBridge bridge = new CCMBridge(); + bridge.execute("ccm create %s -n %d:%d -s -b %s", name, nbNodesDC1, nbNodesDC2, CASSANDRA_VERSION); + return bridge; + } + + public static CCMBridge.CCMCluster buildCluster(int nbNodes, Cluster.Builder builder) { + return CCMCluster.create(nbNodes, builder); + } + + public static CCMBridge.CCMCluster buildCluster(int nbNodesDC1, int nbNodesDC2, Cluster.Builder builder) { + return CCMCluster.create(nbNodesDC1, nbNodesDC2, builder); + } + public void start() { execute("ccm start"); } @@ -89,7 +99,8 @@ public void remove() { } public void bootstrapNode(int n) { - execute("ccm add node%d -i 127.0.0.%d -s; ccm start", n, n); + execute("ccm add node%d -i 127.0.0.%d -b", n, n); + execute("ccm node%d start", n); } private void execute(String command, Object... args) { @@ -121,6 +132,7 @@ private void execute(String command, Object... args) { } } + // One cluster for the whole test class public static abstract class PerClassSingleNodeCluster { protected static CCMBridge cassandraCluster; @@ -194,4 +206,60 @@ public void maybeCreateSchema() throws NoHostAvailableException { } } } + + public static class CCMCluster { + + public final Cluster cluster; + public final Session session; + + public final CCMBridge bridge; + + private boolean erroredOut; + + public static CCMCluster create(int nbNodes, Cluster.Builder builder) { + if (nbNodes == 0) + throw new IllegalArgumentException(); + + return new CCMCluster(CCMBridge.create("test", nbNodes), builder); + } + + public static CCMCluster create(int nbNodesDC1, int nbNodesDC2, Cluster.Builder builder) { + if (nbNodesDC1 == 0) + throw new IllegalArgumentException(); + + return new CCMCluster(CCMBridge.create("test", nbNodesDC1, nbNodesDC2), builder); + } + + private CCMCluster(CCMBridge bridge, Cluster.Builder builder) { + this.bridge = bridge; + try { + this.cluster = builder.addContactPoints("127.0.0.1").build(); + this.session = cluster.connect(); + + } catch (NoHostAvailableException e) { + for (Map.Entry entry : e.getErrors().entrySet()) + logger.info("Error connecting to " + entry.getKey() + ": " + entry.getValue()); + throw new RuntimeException(e); + } + } + + public void errorOut() { + erroredOut = true; + } + + public void discard() { + if (cluster != null) + cluster.shutdown(); + + if (bridge == null) { + logger.error("No cluster to discard"); + } else if (erroredOut) { + bridge.stop(); + logger.info("Error during tests, kept C* logs in " + bridge.ccmDir); + } else { + bridge.remove(); + bridge.ccmDir.delete(); + } + } + } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java new file mode 100644 index 00000000000..b4d31784d99 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java @@ -0,0 +1,163 @@ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.util.*; + +import org.junit.Test; +import static org.junit.Assert.*; + +import com.datastax.driver.core.policies.*; +import com.datastax.driver.core.exceptions.*; +import static com.datastax.driver.core.TestUtils.*; + +public class LoadBalancingPolicyTest { + + private static final String TABLE = "test"; + + private Map coordinators = new HashMap(); + + private void createSchema(Session session) throws NoHostAvailableException { + session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, SIMPLE_KEYSPACE, 1)); + session.execute("USE " + SIMPLE_KEYSPACE); + session.execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", TABLE)); + + // Let the schema propagate (TODO: add a schema agreement check) + try { Thread.sleep(300); } catch (Exception e) {} + } + + private void createMultiDCSchema(Session session) throws NoHostAvailableException { + + session.execute(String.format(CREATE_KEYSPACE_GENERIC_FORMAT, SIMPLE_KEYSPACE, "NetworkTopologyStrategy", "'dc1' : 1, 'dc2' : 1")); + session.execute("USE " + SIMPLE_KEYSPACE); + session.execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", TABLE)); + + // Let the schema propagate (TODO: add a schema agreement check) + try { Thread.sleep(300); } catch (Exception e) {} + } + + private void addCoordinator(ResultSet rs) { + InetAddress coordinator = rs.getQueriedHost(); + Integer n = coordinators.get(coordinator); + coordinators.put(coordinator, n == null ? 1 : n + 1); + } + + private void assertQueried(String host, int n) { + try { + Integer queried = coordinators.get(InetAddress.getByName(host)); + assertEquals("For " + host, n, queried == null ? 0 : queried); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void resetCoordinators() { + coordinators = new HashMap(); + } + + private void init(CCMBridge.CCMCluster c, int n) throws NoHostAvailableException { + // We don't use insert for our test because the resultSet don't ship the queriedHost + // Also note that we don't use tracing because this would trigger requests that screw up the test + for (int i = 0; i < n; ++i) + c.session.execute(String.format("INSERT INTO %s(k, i) VALUES (0, 0)", TABLE)); + } + + private void query(CCMBridge.CCMCluster c, int n) throws NoHostAvailableException { + ByteBuffer routingKey = ByteBuffer.allocate(4); + routingKey.putInt(0, 0); + for (int i = 0; i < n; ++i) + addCoordinator(c.session.execute(new SimpleStatement(String.format("SELECT * FROM %s WHERE k = 0", TABLE)).setRoutingKey(routingKey))); + } + + @Test + public void roundRobinTest() throws Throwable { + + Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.RoundRobin.Factory.INSTANCE); + CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); + createSchema(c.session); + try { + + init(c, 12); + query(c, 12); + + assertQueried("127.0.0.1", 6); + assertQueried("127.0.0.2", 6); + + resetCoordinators(); + c.bridge.bootstrapNode(3); + waitFor("127.0.0.3", c.cluster, 10); + + query(c, 12); + + assertQueried("127.0.0.1", 4); + assertQueried("127.0.0.2", 4); + assertQueried("127.0.0.3", 4); + + } catch (Throwable e) { + c.errorOut(); + throw e; + } finally { + c.discard(); + } + } + + @Test + public void DCAwareRoundRobinTest() throws Throwable { + + Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.DCAwareRoundRobin.Factory.create("dc2")); + CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, 2, builder); + createMultiDCSchema(c.session); + try { + + init(c, 12); + query(c, 12); + + assertQueried("127.0.0.1", 0); + assertQueried("127.0.0.2", 0); + assertQueried("127.0.0.3", 6); + assertQueried("127.0.0.4", 6); + + } catch (Throwable e) { + c.errorOut(); + throw e; + } finally { + c.discard(); + } + } + + @Test + public void tokenAwareTest() throws Throwable { + + Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.TokenAware.Factory.create(LoadBalancingPolicy.RoundRobin.Factory.INSTANCE)); + CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); + createSchema(c.session); + try { + + init(c, 12); + query(c, 12); + + // Not the best test ever, we should use OPP and check we do it the + // right nodes. But since M3P is hard-coded for now, let just check + // we just hit only one node. + assertQueried("127.0.0.1", 0); + assertQueried("127.0.0.2", 12); + + resetCoordinators(); + c.bridge.bootstrapNode(3); + waitFor("127.0.0.3", c.cluster, 10); + + query(c, 12); + + // We should still be hitting only one node + assertQueried("127.0.0.1", 0); + assertQueried("127.0.0.2", 12); + assertQueried("127.0.0.3", 0); + + } catch (Throwable e) { + c.errorOut(); + throw e; + } finally { + c.discard(); + } + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index 8bf9ff48a52..bde9d9ee0c4 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -2,8 +2,6 @@ import java.util.*; -import org.junit.Before; -import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.*; diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java index ab87195f414..1e604b109f5 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -11,6 +11,7 @@ public abstract class TestUtils { public static final String CREATE_KEYSPACE_SIMPLE_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : %d }"; + public static final String CREATE_KEYSPACE_GENERIC_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : '%s', %s }"; public static final String SIMPLE_KEYSPACE = "ks"; @@ -225,4 +226,36 @@ public static Object getFixedValue(final DataType type) { } throw new RuntimeException("Missing handling of " + type); } + + // Wait for a node to be up and running + // This is used because there is some delay between when a node has been + // added through ccm and when it's actually available for querying + public static void waitFor(String node, Cluster cluster, int maxTry) { + InetAddress address; + try { + address = InetAddress.getByName(node); + } catch (Exception e) { + // That's a problem but that's not *our* problem + return; + } + + ClusterMetadata metadata = cluster.getMetadata(); + for (int i = 0; i < maxTry; ++i) { + for (Host host : metadata.getAllHosts()) { + if (host.getAddress().equals(address) && host.getMonitor().isUp()) + return; + } + try { Thread.sleep(1000); } catch (Exception e) {} + } + + for (Host host : metadata.getAllHosts()) { + if (host.getAddress().equals(address)) { + if (host.getMonitor().isUp()) + return; + else + throw new IllegalStateException(node + " is part of the cluster but is not UP after " + maxTry + "s"); + } + } + throw new IllegalStateException(node + " is not part of the cluster after " + maxTry + "s"); + } } diff --git a/pom.xml b/pom.xml index 4a0348bdb75..f1c135a5b3a 100644 --- a/pom.xml +++ b/pom.xml @@ -13,13 +13,12 @@ driver-jdbc driver-examples - + - org.apache.cassandra cassandra-all - 1.2.0-SNAPSHOT + 1.2.0-beta2 From 76ed3f6a458e11cef79856f1c161198c7dbc7f32 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 14 Nov 2012 11:34:01 +0100 Subject: [PATCH 073/719] refresh tokens on node move and better tests --- .../com/datastax/driver/core/Cluster.java | 3 ++ .../driver/core/ControlConnection.java | 2 +- .../driver/core/LoadBalancingPolicyTest.java | 35 +++++++++++++++---- .../com/datastax/driver/core/TestUtils.java | 13 +++++-- 4 files changed, 43 insertions(+), 10 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 6a6837c3de5..cd6d47e3f43 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -686,6 +686,9 @@ public void run() { case REMOVED_NODE: removeHost(metadata.getHost(tpc.node.getAddress())); break; + case MOVED_NODE: + controlConnection.refreshNodeListAndTokenMap(); + break; } break; case STATUS_CHANGE: diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 3320200ae32..535bc8e196b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -199,7 +199,7 @@ static void refreshSchema(Connection connection, String keyspace, String table, cluster.metadata.rebuildSchema(keyspace, table, ksFuture == null ? null : ksFuture.get(), cfFuture.get(), colsFuture.get()); } - private void refreshNodeListAndTokenMap() { + public void refreshNodeListAndTokenMap() { Connection c = connectionRef.get(); // At startup, when we add the initial nodes, this will be null, which is ok if (c == null) diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java index b4d31784d99..3d7055f3a4e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java @@ -16,6 +16,7 @@ public class LoadBalancingPolicyTest { private static final String TABLE = "test"; private Map coordinators = new HashMap(); + private PreparedStatement prepared; private void createSchema(Session session) throws NoHostAvailableException { session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, SIMPLE_KEYSPACE, 1)); @@ -60,13 +61,25 @@ private void init(CCMBridge.CCMCluster c, int n) throws NoHostAvailableException // Also note that we don't use tracing because this would trigger requests that screw up the test for (int i = 0; i < n; ++i) c.session.execute(String.format("INSERT INTO %s(k, i) VALUES (0, 0)", TABLE)); + + prepared = c.session.prepare("SELECT * FROM " + TABLE + " WHERE k = ?"); } private void query(CCMBridge.CCMCluster c, int n) throws NoHostAvailableException { - ByteBuffer routingKey = ByteBuffer.allocate(4); - routingKey.putInt(0, 0); - for (int i = 0; i < n; ++i) - addCoordinator(c.session.execute(new SimpleStatement(String.format("SELECT * FROM %s WHERE k = 0", TABLE)).setRoutingKey(routingKey))); + query(c, n, false); + } + + private void query(CCMBridge.CCMCluster c, int n, boolean usePrepared) throws NoHostAvailableException { + if (usePrepared) { + BoundStatement bs = prepared.bind(0); + for (int i = 0; i < n; ++i) + addCoordinator(c.session.execute(bs)); + } else { + ByteBuffer routingKey = ByteBuffer.allocate(4); + routingKey.putInt(0, 0); + for (int i = 0; i < n; ++i) + addCoordinator(c.session.execute(new SimpleStatement(String.format("SELECT * FROM %s WHERE k = 0", TABLE)).setRoutingKey(routingKey))); + } } @Test @@ -85,7 +98,7 @@ public void roundRobinTest() throws Throwable { resetCoordinators(); c.bridge.bootstrapNode(3); - waitFor("127.0.0.3", c.cluster, 10); + waitFor("127.0.0.3", c.cluster, 20); query(c, 12); @@ -127,7 +140,15 @@ public void DCAwareRoundRobinTest() throws Throwable { @Test public void tokenAwareTest() throws Throwable { + tokenAwareTest(false); + } + @Test + public void tokenAwarePreparedTest() throws Throwable { + tokenAwareTest(true); + } + + public void tokenAwareTest(boolean usePrepared) throws Throwable { Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.TokenAware.Factory.create(LoadBalancingPolicy.RoundRobin.Factory.INSTANCE)); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); createSchema(c.session); @@ -144,9 +165,9 @@ public void tokenAwareTest() throws Throwable { resetCoordinators(); c.bridge.bootstrapNode(3); - waitFor("127.0.0.3", c.cluster, 10); + waitFor("127.0.0.3", c.cluster, 20); - query(c, 12); + query(c, 12, usePrepared); // We should still be hitting only one node assertQueried("127.0.0.1", 0); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java index 1e604b109f5..64f736aa40b 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -5,11 +5,16 @@ import java.nio.ByteBuffer; import java.util.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A number of static fields/methods handy for tests. */ public abstract class TestUtils { + private static final Logger logger = LoggerFactory.getLogger(TestUtils.class); + public static final String CREATE_KEYSPACE_SIMPLE_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : %d }"; public static final String CREATE_KEYSPACE_GENERIC_FORMAT = "CREATE KEYSPACE %s WITH replication = { 'class' : '%s', %s }"; @@ -250,12 +255,16 @@ public static void waitFor(String node, Cluster cluster, int maxTry) { for (Host host : metadata.getAllHosts()) { if (host.getAddress().equals(address)) { - if (host.getMonitor().isUp()) + if (host.getMonitor().isUp()) { return; - else + } else { + // logging it because this give use the timestamp of when this happens + logger.info(node + " is part of the cluster but is not UP after " + maxTry + "s"); throw new IllegalStateException(node + " is part of the cluster but is not UP after " + maxTry + "s"); + } } } + logger.info(node + " is not part of the cluster after " + maxTry + "s"); throw new IllegalStateException(node + " is not part of the cluster after " + maxTry + "s"); } } From 6e56f576f76303b7ff3cfe55807cd074ebb3db88 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 14 Nov 2012 13:06:22 +0100 Subject: [PATCH 074/719] Wait for schema agreement when schema is modified --- .../com/datastax/driver/core/Cluster.java | 3 ++ .../driver/core/ControlConnection.java | 44 +++++++++++++++++++ .../java/com/datastax/driver/core/Host.java | 2 +- .../driver/core/LoadBalancingPolicyTest.java | 6 --- 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index cd6d47e3f43..389cc593e5c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -642,9 +642,12 @@ public void run() { public void refreshSchema(final Connection connection, final SimpleFuture future, final ResultSet rs, final String keyspace, final String table) { if (logger.isDebugEnabled()) logger.debug("Refreshing schema for {}{}", keyspace == null ? "" : keyspace, table == null ? "" : "." + table); + executor.submit(new Runnable() { public void run() { try { + // Before refreshing the schema, wait for schema agreement so that querying a table just after having created it don't fail. + ControlConnection.waitForSchemaAgreement(connection, metadata); ControlConnection.refreshSchema(connection, keyspace, table, Cluster.Manager.this); } catch (Exception e) { logger.error("Error during schema refresh ({}). The schema from Cluster.getMetadata() migth appear stale. Asynchronously submitting job to fix.", e.getMessage()); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 535bc8e196b..26cc229bf9d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -22,6 +22,9 @@ class ControlConnection implements Host.StateListener { private static final Logger logger = LoggerFactory.getLogger(ControlConnection.class); + // TODO: we might want to make that configurable + private static final long MAX_SCHEMA_AGREEMENT_WAIT_MS = 10000; + private static final String SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"; private static final String SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"; private static final String SELECT_COLUMNS = "SELECT * FROM system.schema_columns"; @@ -31,6 +34,9 @@ class ControlConnection implements Host.StateListener { //private static final String SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner FROM system.local WHERE key='local'"; private static final String SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens FROM system.local WHERE key='local'"; + private static final String SELECT_SCHEMA_PEERS = "SELECT peer, schema_version FROM system.peers"; + private static final String SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'"; + private final AtomicReference connectionRef = new AtomicReference(); private final Cluster.Manager cluster; @@ -290,6 +296,44 @@ private void refreshNodeListAndTokenMap(Connection connection) throws Connection cluster.metadata.rebuildTokenMap(partitioner, tokenMap); } + static boolean waitForSchemaAgreement(Connection connection, ClusterMetadata metadata) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + + long start = System.currentTimeMillis(); + long elapsed = 0; + while (elapsed < MAX_SCHEMA_AGREEMENT_WAIT_MS) { + ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_SCHEMA_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + // TODO: fix once we have rc1 + //ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_SCHEMA_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + connection.write(peersFuture.callback); + //connection.write(localFuture.callback); + + Set versions = new HashSet(); + + //CQLRow localRow = localFuture.get().fetchOne(); + //if (localRow != null && !localRow.isNull("schema_version")) + // versions.add(row.getUUID("schema_version")); + + for (CQLRow row : peersFuture.get()) { + if (row.isNull("peer") || row.isNull("schema_version")) + continue; + + Host peer = metadata.getHost(row.getInet("peer")); + if (peer != null && peer.getMonitor().isUp()) + versions.add(row.getUUID("schema_version")); + } + + if (versions.size() <= 1) + return true; + + // let's not flood the node too much + try { Thread.sleep(200); } catch (InterruptedException e) {}; + + elapsed = System.currentTimeMillis() - start; + } + + return false; + } + public void onUp(Host host) { balancingPolicy.onUp(host); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 0ccdf9dbb29..4f3d2ee98e2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -163,10 +163,10 @@ private void setDown() { * registered listener that the node is up. */ void reset() { - isUp = true; policy.reset(); for (Host.StateListener listener : listeners) listener.onUp(Host.this); + isUp = true; } boolean signalConnectionFailure(ConnectionException exception) { diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java index 3d7055f3a4e..62ba5e386c4 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java @@ -22,9 +22,6 @@ private void createSchema(Session session) throws NoHostAvailableException { session.execute(String.format(CREATE_KEYSPACE_SIMPLE_FORMAT, SIMPLE_KEYSPACE, 1)); session.execute("USE " + SIMPLE_KEYSPACE); session.execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", TABLE)); - - // Let the schema propagate (TODO: add a schema agreement check) - try { Thread.sleep(300); } catch (Exception e) {} } private void createMultiDCSchema(Session session) throws NoHostAvailableException { @@ -32,9 +29,6 @@ private void createMultiDCSchema(Session session) throws NoHostAvailableExceptio session.execute(String.format(CREATE_KEYSPACE_GENERIC_FORMAT, SIMPLE_KEYSPACE, "NetworkTopologyStrategy", "'dc1' : 1, 'dc2' : 1")); session.execute("USE " + SIMPLE_KEYSPACE); session.execute(String.format("CREATE TABLE %s (k int PRIMARY KEY, i int)", TABLE)); - - // Let the schema propagate (TODO: add a schema agreement check) - try { Thread.sleep(300); } catch (Exception e) {} } private void addCoordinator(ResultSet rs) { From 3c071a0205c208e0392d87abc1371038731e1e05 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 14 Nov 2012 19:48:43 +0100 Subject: [PATCH 075/719] Adds beginning of query builder (SELECT + DMLs) --- .../datastax/driver/core/CQLStatement.java | 5 + .../com/datastax/driver/core/DataType.java | 36 +++ .../datastax/driver/core/SimpleStatement.java | 3 +- .../core/utils/querybuilder/Assignment.java | 221 ++++++++++++++++++ .../driver/core/utils/querybuilder/Batch.java | 63 +++++ .../utils/querybuilder/BuiltStatement.java | 82 +++++++ .../core/utils/querybuilder/Clause.java | 85 +++++++ .../core/utils/querybuilder/Delete.java | 88 +++++++ .../core/utils/querybuilder/Insert.java | 98 ++++++++ .../core/utils/querybuilder/Ordering.java | 28 +++ .../core/utils/querybuilder/QueryBuilder.java | 102 ++++++++ .../core/utils/querybuilder/Select.java | 100 ++++++++ .../core/utils/querybuilder/Update.java | 79 +++++++ .../driver/core/utils/querybuilder/Using.java | 33 +++ .../driver/core/utils/querybuilder/Utils.java | 193 +++++++++++++++ .../utils/QueryBuilderRoutingKeyTest.java | 66 ++++++ .../driver/core/utils/QueryBuilderTest.java | 109 +++++++++ 17 files changed, 1389 insertions(+), 2 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Assignment.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Clause.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Ordering.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Using.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Utils.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderTest.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java b/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java index 38083d78b71..9b21b597e44 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java @@ -15,4 +15,9 @@ public abstract class CQLStatement extends Query { * @return a valid CQL query string. */ public abstract String getQueryString(); + + @Override + public String toString() { + return getQueryString(); + } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index 545e3501efe..df7d581d8ba 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -1,5 +1,11 @@ package com.datastax.driver.core; +import java.nio.ByteBuffer; + +import org.apache.cassandra.db.marshal.MarshalException; + +import com.datastax.driver.core.exceptions.InvalidTypeException; + /** * Supported data types for columns. */ @@ -49,6 +55,18 @@ public enum Kind { NATIVE, COLLECTION } */ public Collection asCollection(); + /** + * Parse a string value for the type this object represent, returning its + * Cassandra binary representation. + * + * @param value the value to parse. + * @return the binary representation of {@code value}. + * + * @throws InvalidTypeException if {@code value} is not a valid string + * representation for this type. + */ + public ByteBuffer parseString(String value); + /** * Native types supported by cassandra. */ @@ -79,6 +97,15 @@ public enum Native implements DataType { public String toString() { return super.toString().toLowerCase(); } + + public ByteBuffer parseString(String value) + { + try { + return Codec.getCodec(this).fromString(value); + } catch (MarshalException e) { + throw new InvalidTypeException(String.format("Cannot parse '%s' as a %s value (%s)", value, this, e.getMessage())); + } + } } /** @@ -110,6 +137,15 @@ protected Collection(Kind kind) { public Native asNative() { throw new IllegalStateException("Not a native type, but a collection one"); } public Collection asCollection() { return this; } + public ByteBuffer parseString(String value) + { + try { + return Codec.getCodec(this).fromString(value); + } catch (MarshalException e) { + throw new InvalidTypeException(String.format("Cannot parse '%s' as a %s value (%s)", value, this, e.getMessage())); + } + } + /** * The type of lists. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java index 7dd378762da..15cd27a7bbb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -101,8 +101,7 @@ static ByteBuffer compose(ByteBuffer... buffers) { return out; } - private static void putShortLength(ByteBuffer bb, int length) - { + private static void putShortLength(ByteBuffer bb, int length) { bb.put((byte) ((length >> 8) & 0xFF)); bb.put((byte) (length & 0xFF)); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Assignment.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Assignment.java new file mode 100644 index 00000000000..8dfbae53cf1 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Assignment.java @@ -0,0 +1,221 @@ +package com.datastax.driver.core.utils.querybuilder; + +import java.util.*; + +import static com.datastax.driver.core.utils.querybuilder.Utils.*; + +public abstract class Assignment extends Utils.Appendeable { + + protected final String name; + + private Assignment(String name) { + this.name = name; + }; + + String name() { + return name; + } + + public static Assignment set(String name, Object value) { + return new SetAssignment(name, value); + } + + public static Assignment incr(String name) { + return incr(name, 1L); + } + + public static Assignment incr(String name, long value) { + return new CounterAssignment(name, value, true); + } + + public static Assignment decr(String name) { + return decr(name, 1L); + } + + public static Assignment decr(String name, long value) { + return new CounterAssignment(name, value, false); + } + + public static Assignment prepend(String name, Object value) { + return new ListPrependAssignment(name, Collections.singletonList(value)); + } + + public static Assignment prependAll(String name, List list) { + return new ListPrependAssignment(name, list); + } + + public static Assignment append(String name, Object value) { + return new CollectionAssignment(name, Collections.singletonList(value), true); + } + + public static Assignment appendAll(String name, List list) { + return new CollectionAssignment(name, list, true); + } + + public static Assignment discard(String name, Object value) { + return new CollectionAssignment(name, Collections.singletonList(value), false); + } + + public static Assignment discardAll(String name, List list) { + return new CollectionAssignment(name, list, false); + } + + public static Assignment setIdx(String name, int idx, Object value) { + return new ListSetIdxAssignment(name, idx, value); + } + + public static Assignment add(String name, Object value) { + return new CollectionAssignment(name, Collections.singleton(value), true); + } + + public static Assignment addAll(String name, Set set) { + return new CollectionAssignment(name, set, true); + } + + public static Assignment remove(String name, Object value) { + return new CollectionAssignment(name, Collections.singleton(value), false); + } + + public static Assignment removeAll(String name, Set set) { + return new CollectionAssignment(name, set, false); + } + + public static Assignment put(String name, Object key, Object value) { + return new MapPutAssignment(name, key, value); + } + + public static Assignment putAll(String name, Map value) { + return new CollectionAssignment(name, value, true); + } + + private static class SetAssignment extends Assignment { + + private final Object value; + + SetAssignment(String name, Object value) { + super(name); + this.value = value; + } + + void appendTo(StringBuilder sb) { + appendName(name, sb); + sb.append("="); + appendValue(value, sb); + } + + Object firstValue() { + return value; + } + } + + private static abstract class NoRoutingAssignment extends Assignment { + + NoRoutingAssignment(String name) { + super(name); + } + + @Override + String name() { + // This can't be a routing key + return null; + } + + String firstValue() { + return null; + } + } + + private static class CounterAssignment extends NoRoutingAssignment { + + private final long value; + private final boolean isIncr; + + CounterAssignment(String name, long value, boolean isIncr) { + super(name); + if (!isIncr && value < 0) { + this.value = -value; + this.isIncr = true; + } else { + this.value = value; + this.isIncr = isIncr; + } + } + + void appendTo(StringBuilder sb) { + appendName(name, sb).append("="); + appendName(name, sb).append(isIncr ? "+" : "-").append(value); + } + + } + + private static class ListPrependAssignment extends NoRoutingAssignment { + + private final List value; + + ListPrependAssignment(String name, List value) { + super(name); + this.value = value; + } + + void appendTo(StringBuilder sb) { + appendName(name, sb).append("="); + appendList(value, sb); + sb.append("+"); + appendName(name, sb); + } + } + + private static class ListSetIdxAssignment extends NoRoutingAssignment { + + private final int idx; + private final Object value; + + ListSetIdxAssignment(String name, int idx, Object value) { + super(name); + this.idx = idx; + this.value = value; + } + + void appendTo(StringBuilder sb) { + appendName(name, sb).append("[").append(idx).append("]="); + appendValue(value, sb); + } + } + + private static class CollectionAssignment extends NoRoutingAssignment { + + private final Object collection; + private final boolean isAdd; + + CollectionAssignment(String name, Object collection, boolean isAdd) { + super(name); + this.collection = collection; + this.isAdd = isAdd; + } + + void appendTo(StringBuilder sb) { + appendName(name, sb).append("="); + appendName(name, sb).append(isAdd ? "+" : "-"); + appendCollection(collection, sb); + } + } + + private static class MapPutAssignment extends NoRoutingAssignment { + + private final Object key; + private final Object value; + + MapPutAssignment(String name, Object key, Object value) { + super(name); + this.key = key; + this.value = value; + } + + void appendTo(StringBuilder sb) { + appendName(name, sb).append("["); + appendValue(key, sb); + sb.append("]="); + appendValue(value, sb); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java new file mode 100644 index 00000000000..a0493b3ad00 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java @@ -0,0 +1,63 @@ +package com.datastax.driver.core.utils.querybuilder; + +import java.nio.ByteBuffer; + +import com.datastax.driver.core.CQLStatement; + +public class Batch extends CQLStatement { + + private final ByteBuffer routingKey; + private final CQLStatement[] statements; + private Using[] usings; + + private StringBuilder builder; + + Batch(CQLStatement[] statements) { + if (statements.length == 0) + throw new IllegalArgumentException("Cannot build a BATCH without any statement"); + + this.statements = statements; + ByteBuffer rk = null; + for (int i = 0; i < statements.length; i++) { + rk = statements[i].getRoutingKey(); + if (rk != null) + break; + } + this.routingKey = rk; + } + + public String getQueryString() { + if (builder != null) + return builder.toString(); + + builder = new StringBuilder(); + builder.append("BEGIN BATCH"); + + if (usings != null && usings.length > 0) { + builder.append(" USING "); + Utils.joinAndAppend(null, builder, " AND ", usings); + } + builder.append(" "); + + for (int i = 0; i < statements.length; i++) { + String str = statements[i].getQueryString(); + builder.append(str); + if (!str.trim().endsWith(";")) + builder.append(";"); + } + builder.append("APPLY BATCH;"); + return builder.toString(); + } + + public Batch using(Using... usings) { + if (this.usings != null) + throw new IllegalStateException("A USING clause has already been provided"); + + this.usings = usings; + return this; + } + + public ByteBuffer getRoutingKey() { + return routingKey; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java new file mode 100644 index 00000000000..c5e2fba021d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java @@ -0,0 +1,82 @@ +package com.datastax.driver.core.utils.querybuilder; + +import java.nio.ByteBuffer; +import java.util.List; + +import com.datastax.driver.core.ColumnMetadata; +import com.datastax.driver.core.CQLStatement; +import com.datastax.driver.core.TableMetadata; + +abstract class BuiltStatement extends CQLStatement { + + private final List partitionKey; + private final ByteBuffer[] routingKey; + + protected final StringBuilder builder = new StringBuilder(); + + protected BuiltStatement() { + this.partitionKey = null; + this.routingKey = null; + } + + protected BuiltStatement(TableMetadata tableMetadata) { + this.partitionKey = tableMetadata.getPartitionKey(); + this.routingKey = new ByteBuffer[tableMetadata.getPartitionKey().size()]; + } + + public String getQueryString() { + return builder.append(";").toString(); + } + + protected StringBuilder appendName(String name) { + return Utils.appendName(name, builder); + } + + // TODO: Correctly document the InvalidTypeException + void maybeAddRoutingKey(String name, Object value) { + if (routingKey == null || name == null) + return; + + for (int i = 0; i < partitionKey.size(); i++) { + if (name.equals(partitionKey.get(i).getName())) { + routingKey[i] = partitionKey.get(i).getType().parseString(Utils.toRawString(value)); + return; + } + } + } + + public ByteBuffer getRoutingKey() { + if (routingKey == null) + return null; + + for (ByteBuffer bb : routingKey) + if (bb == null) + return null; + + return routingKey.length == 1 + ? routingKey[0] + : compose(routingKey); + } + + // This is a duplicate of the one in SimpleStatement, but I don't want to expose this publicly so... + static ByteBuffer compose(ByteBuffer... buffers) { + int totalLength = 0; + for (ByteBuffer bb : buffers) + totalLength += 2 + bb.remaining() + 1; + + ByteBuffer out = ByteBuffer.allocate(totalLength); + for (ByteBuffer bb : buffers) + { + putShortLength(out, bb.remaining()); + out.put(bb); + out.put((byte) 0); + } + out.flip(); + return out; + } + + private static void putShortLength(ByteBuffer bb, int length) { + bb.put((byte) ((length >> 8) & 0xFF)); + bb.put((byte) (length & 0xFF)); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Clause.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Clause.java new file mode 100644 index 00000000000..d840aeb0758 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Clause.java @@ -0,0 +1,85 @@ +package com.datastax.driver.core.utils.querybuilder; + +import java.net.InetAddress; +import java.util.Date; +import java.util.UUID; + +public abstract class Clause extends Utils.Appendeable { + + protected final String name; + + private Clause(String name) { + this.name = name; + }; + + String name() { + return name; + } + + public static Clause eq(String name, Object value) { + return new SimpleClause(name, "=", value); + } + + public static Clause in(String name, Object... values) { + return new InClause(name, values); + } + + public static Clause lt(String name, Object value) { + return new SimpleClause(name, "<", value); + } + + public static Clause lte(String name, Object value) { + return new SimpleClause(name, "<=", value); + } + + public static Clause gt(String name, Object value) { + return new SimpleClause(name, ">", value); + } + + public static Clause gte(String name, Object value) { + return new SimpleClause(name, ">=", value); + } + + private static class SimpleClause extends Clause { + + private final String op; + private final Object value; + + private SimpleClause(String name, String op, Object value) { + super(name); + this.op = op; + this.value = value; + } + + void appendTo(StringBuilder sb) { + Utils.appendName(name, sb).append(op); + Utils.appendValue(value, sb); + } + + Object firstValue() { + return value; + } + } + + private static class InClause extends Clause { + + private final Object[] values; + + private InClause(String name, Object[] values) { + super(name); + this.values = values; + + if (values == null || values.length == 0) + throw new IllegalArgumentException("Missing values for IN clause"); + } + + void appendTo(StringBuilder sb) { + Utils.appendName(name, sb).append(" IN ("); + Utils.joinAndAppendValues(sb, ",", values).append(")"); + } + + Object firstValue() { + return values[0]; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java new file mode 100644 index 00000000000..96eb630a4f0 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java @@ -0,0 +1,88 @@ +package com.datastax.driver.core.utils.querybuilder; + +import com.datastax.driver.core.TableMetadata; + +public class Delete extends BuiltStatement { + + Delete(String keyspace, String table, String[] columnNames, Clause[] clauses, Using[] usings) { + super(); + init(keyspace, table, columnNames, clauses, usings); + } + + Delete(TableMetadata table, String[] columnNames, Clause[] clauses, Using[] usings) { + super(table); + init(table.getKeyspace().getName(), table.getName(), columnNames, clauses, usings); + } + + private void init(String keyspaceName, String tableName, String[] columnNames, Clause[] clauses, Using[] usings) { + builder.append("DELETE "); + Utils.joinAndAppendNames(builder, ",", columnNames); + + builder.append(" FROM "); + if (keyspaceName != null) + appendName(keyspaceName).append("."); + appendName(tableName); + + if (usings != null && usings.length > 0) { + builder.append(" USING "); + Utils.joinAndAppend(null, builder, " AND ", usings); + } + + builder.append(" WHERE "); + Utils.joinAndAppend(this, builder, ",", clauses); + } + + public static class Builder { + + private final String[] columnNames; + + private TableMetadata tableMetadata; + + private String keyspace; + private String table; + + private Using[] usings; + + Builder(String[] columnNames) { + this.columnNames = columnNames; + } + + public Builder from(String table) { + if (table != null && tableMetadata != null) + throw new IllegalStateException("A FROM clause has already been provided"); + + return from(null, table); + } + + public Builder from(String keyspace, String table) { + if (table != null && tableMetadata != null) + throw new IllegalStateException("A FROM clause has already been provided"); + + this.keyspace = keyspace; + this.table = table; + return this; + } + + public Builder from(TableMetadata table) { + if (table != null && tableMetadata != null) + throw new IllegalStateException("A FROM clause has already been provided"); + + this.tableMetadata = table; + return this; + } + + public Builder using(Using... usings) { + if (this.usings != null) + throw new IllegalStateException("A USING clause has already been provided"); + + this.usings = usings; + return this; + } + + public Delete where(Clause... clauses) { + return tableMetadata == null + ? new Delete(keyspace, table, columnNames, clauses, usings) + : new Delete(tableMetadata, columnNames, clauses, usings); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java new file mode 100644 index 00000000000..84010993335 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java @@ -0,0 +1,98 @@ +package com.datastax.driver.core.utils.querybuilder; + +import com.datastax.driver.core.TableMetadata; + +public class Insert extends BuiltStatement { + + private boolean usingsProvided; + + Insert(String keyspace, String table, String[] columnNames, Object[] values) { + super(); + init(keyspace, table, columnNames, values); + } + + Insert(TableMetadata table, String[] columnNames, Object[] values) { + super(table); + init(table.getKeyspace().getName(), table.getName(), columnNames, values); + } + + private void init(String keyspaceName, String tableName, String[] columnNames, Object[] values) { + builder.append("INSERT INTO "); + if (keyspaceName != null) + appendName(keyspaceName).append("."); + appendName(tableName); + builder.append("("); + Utils.joinAndAppendNames(builder, ",", columnNames); + builder.append(") VALUES ("); + Utils.joinAndAppendValues(builder, ",", values); + builder.append(")"); + + for (int i = 0; i < columnNames.length; i++) + maybeAddRoutingKey(columnNames[i], values[i]); + } + + public Insert using(Using... usings) { + if (usingsProvided) + throw new IllegalStateException("A USING clause has already been provided"); + + usingsProvided = true; + + if (usings.length == 0) + return this; + + builder.append(" USING "); + Utils.joinAndAppend(null, builder, " AND ", usings); + return this; + } + + public static class Builder { + + private final String[] columnNames; + + private TableMetadata tableMetadata; + + private String keyspace; + private String table; + + Builder(String[] columnNames) { + if (columnNames.length == 0) + throw new IllegalArgumentException("Invalid empty column names"); + + this.columnNames = columnNames; + } + + public Builder into(String table) { + if (table != null && tableMetadata != null) + throw new IllegalStateException("An INTO clause has already been provided"); + + return into(null, table); + } + + public Builder into(String keyspace, String table) { + if (table != null && tableMetadata != null) + throw new IllegalStateException("An INTO clause has already been provided"); + + this.keyspace = keyspace; + this.table = table; + return this; + } + + public Builder into(TableMetadata table) { + if (table != null && tableMetadata != null) + throw new IllegalStateException("An INTO clause has already been provided"); + + this.tableMetadata = table; + return this; + } + + public Insert values(Object... values) { + + if (values.length != columnNames.length) + throw new IllegalArgumentException(String.format("Number of provided values (%d) doesn't match the number of inserted columns (%d)", values.length, columnNames.length)); + + return tableMetadata == null + ? new Insert(keyspace, table, columnNames, values) + : new Insert(tableMetadata, columnNames, values); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Ordering.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Ordering.java new file mode 100644 index 00000000000..74aef0cab56 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Ordering.java @@ -0,0 +1,28 @@ +package com.datastax.driver.core.utils.querybuilder; + +public class Ordering extends Utils.Appendeable { + + private final String name; + private final boolean isDesc; + + private Ordering(String name, boolean isDesc) { + this.name = name; + this.isDesc = isDesc; + } + + public static Ordering asc(String columnName) { + return new Ordering(columnName, false); + } + + public static Ordering desc(String columnName) { + return new Ordering(columnName, true); + } + + void appendTo(StringBuilder sb) { + Utils.appendName(name, sb); + sb.append(isDesc ? " DESC" : " ASC"); + } + + String name() { return null; } + String firstValue() { return null; } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java new file mode 100644 index 00000000000..dad397a45bd --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -0,0 +1,102 @@ +package com.datastax.driver.core.utils.querybuilder; + +import com.datastax.driver.core.CQLStatement; +import com.datastax.driver.core.TableMetadata; + +public abstract class QueryBuilder { + + private QueryBuilder() {} + + private static final String[] ALL = new String[0]; + private static final String[] COUNT_ALL = new String[]{ "count(*)" }; + + public static Select.Builder select(String... columns) { + return new Select.Builder(columns); + } + + public static String writeTime(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("writetime("); + Utils.appendName(columnName, sb); + sb.append(")"); + return sb.toString(); + } + + public static String[] all() { + return ALL; + } + + public static String[] count() { + return COUNT_ALL; + } + + public static String ttl(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("ttl("); + Utils.appendName(columnName, sb); + sb.append(")"); + return sb.toString(); + } + + public static String quote(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("\""); + Utils.appendName(columnName, sb); + sb.append("\""); + return sb.toString(); + } + + public static String token(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("token("); + Utils.appendName(columnName, sb); + sb.append(")"); + return sb.toString(); + } + + public static String token(String... columnNames) { + StringBuilder sb = new StringBuilder(); + sb.append("token("); + Utils.joinAndAppendNames(sb, ",", columnNames); + sb.append(")"); + return sb.toString(); + } + + public static Insert.Builder insert(String... columns) { + return new Insert.Builder(columns); + } + + public static Update.Builder update(String table) { + return new Update.Builder(null, table); + } + + public static Update.Builder update(String keyspace, String table) { + return new Update.Builder(keyspace, table); + } + + public static Update.Builder update(TableMetadata table) { + return new Update.Builder(table); + } + + public static Delete.Builder delete(String... columns) { + return new Delete.Builder(columns); + } + + public static String listElt(String columnName, int idx) { + StringBuilder sb = new StringBuilder(); + Utils.appendName(columnName, sb); + return sb.append("[").append(idx).append("]").toString(); + } + + public static String mapElt(String columnName, Object key) { + StringBuilder sb = new StringBuilder(); + Utils.appendName(columnName, sb); + sb.append("["); + Utils.appendFlatValue(key, sb); + return sb.append("]").toString(); + } + + public static Batch batch(CQLStatement... statements) { + return new Batch(statements); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java new file mode 100644 index 00000000000..7a38bf9dc40 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java @@ -0,0 +1,100 @@ +package com.datastax.driver.core.utils.querybuilder; + +import com.datastax.driver.core.TableMetadata; + +public class Select extends BuiltStatement { + + private boolean whereProvided; + private boolean orderByProvided; + private boolean limitProvided; + + Select(String keyspace, String table, String[] columnNames) { + super(); + init(keyspace, table, columnNames); + } + + Select(TableMetadata table, String[] columnNames) { + super(table); + init(table.getKeyspace().getName(), table.getName(), columnNames); + } + + private void init(String keyspaceName, String tableName, String[] columnNames) { + builder.append("SELECT "); + if (columnNames.length == 0) { + builder.append("*"); + } else { + Utils.joinAndAppendNames(builder, ",", columnNames); + } + builder.append(" FROM "); + if (keyspaceName != null) + appendName(keyspaceName).append("."); + appendName(tableName); + } + + public Select where(Clause clause) { + if (whereProvided) + throw new IllegalStateException("A WHERE clause has already been provided"); + + whereProvided = true; + builder.append(" WHERE "); + + clause.appendTo(builder); + maybeAddRoutingKey(clause.name(), clause.firstValue()); + return this; + } + + public Select where(Clause... clauses) { + if (whereProvided) + throw new IllegalStateException("A WHERE clause has already been provided"); + + whereProvided = true; + builder.append(" WHERE "); + + Utils.joinAndAppend(this, builder, " AND ", clauses); + + for (int i = 0; i < clauses.length; ++i) + maybeAddRoutingKey(clauses[i].name(), clauses[i].firstValue()); + return this; + } + + public Select orderBy(Ordering... orders) { + if (orderByProvided) + throw new IllegalStateException("An ORDER BY clause has already been provided"); + + orderByProvided = true; + builder.append(" ORDER BY ("); + Utils.joinAndAppend(null, builder, ",", orders); + builder.append(")"); + return this; + } + + public Select limit(int limit) { + if (limitProvided) + throw new IllegalStateException("A LIMIT value has already been provided"); + + limitProvided = true; + builder.append(" LIMIT ").append(limit); + return this; + } + + public static class Builder { + + private final String[] columnNames; + + Builder(String[] columnNames) { + this.columnNames = columnNames; + } + + public Select from(String table) { + return new Select(null, table, columnNames); + } + + public Select from(String keyspace, String table) { + return new Select(keyspace, table, columnNames); + } + + public Select from(TableMetadata table) { + return new Select(table, columnNames); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java new file mode 100644 index 00000000000..d3f24f8ebed --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java @@ -0,0 +1,79 @@ +package com.datastax.driver.core.utils.querybuilder; + +import com.datastax.driver.core.TableMetadata; + +public class Update extends BuiltStatement { + + Update(String keyspace, String table, Assignment[] assignments, Clause[] clauses, Using[] usings) { + super(); + init(keyspace, table, assignments, clauses, usings); + } + + Update(TableMetadata table, Assignment[] assignments, Clause[] clauses, Using[] usings) { + super(table); + init(table.getKeyspace().getName(), table.getName(), assignments, clauses, usings); + } + + private void init(String keyspaceName, String tableName, Assignment[] assignments, Clause[] clauses, Using[] usings) { + builder.append("UPDATE "); + if (keyspaceName != null) + appendName(keyspaceName).append("."); + appendName(tableName); + + if (usings != null && usings.length > 0) { + builder.append(" USING "); + Utils.joinAndAppend(null, builder, " AND ", usings); + } + + builder.append(" SET "); + Utils.joinAndAppend(null, builder, ",", assignments); + + builder.append(" WHERE "); + Utils.joinAndAppend(this, builder, ",", clauses); + } + + public static class Builder { + + private final TableMetadata tableMetadata; + + private final String keyspace; + private final String table; + + private Assignment[] assignments; + private Using[] usings; + + Builder(String keyspace, String table) { + this.keyspace = keyspace; + this.table = table; + this.tableMetadata = null; + } + + Builder(TableMetadata tableMetadata) { + this.tableMetadata = tableMetadata; + this.keyspace = null; + this.table = null; + } + + public Builder using(Using... usings) { + if (this.usings != null) + throw new IllegalStateException("A USING clause has already been provided"); + + this.usings = usings; + return this; + } + + public Builder set(Assignment... assignments) { + if (this.assignments != null) + throw new IllegalStateException("A SET clause has already been provided"); + + this.assignments = assignments; + return this; + } + + public Update where(Clause... clauses) { + return table == null + ? new Update(tableMetadata, assignments, clauses, usings) + : new Update(keyspace, table, assignments, clauses, usings); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Using.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Using.java new file mode 100644 index 00000000000..7de9a394f3a --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Using.java @@ -0,0 +1,33 @@ +package com.datastax.driver.core.utils.querybuilder; + +public class Using extends Utils.Appendeable { + + private final String optionName; + private final long value; + + private Using(String optionName, long value) { + this.optionName = optionName; + this.value = value; + } + + public static Using timestamp(long timestamp) { + if (timestamp < 0) + throw new IllegalArgumentException("Invalid timestamp, must be positive"); + + return new Using("TIMESTAMP", timestamp); + } + + public static Using ttl(int ttl) { + if (ttl < 0) + throw new IllegalArgumentException("Invalid ttl, must be positive"); + + return new Using("TTL", ttl); + } + + void appendTo(StringBuilder sb) { + sb.append(optionName).append(" ").append(value); + } + + String name() { return null; } + String firstValue() { return null; } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Utils.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Utils.java new file mode 100644 index 00000000000..f332833947b --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Utils.java @@ -0,0 +1,193 @@ +package com.datastax.driver.core.utils.querybuilder; + +import java.net.InetAddress; +import java.util.*; +import java.util.regex.Pattern; + +// Static utilities private to the query builder +abstract class Utils { + + private static final Pattern cnamePattern = Pattern.compile("\\w+(?:\\[.+\\])?", Pattern.CASE_INSENSITIVE); + private static final Pattern fctsPattern = Pattern.compile("(?:count|writetime|ttl|token)\\(.*", Pattern.CASE_INSENSITIVE); + + static StringBuilder joinAndAppend(StringBuilder sb, String separator, String[] values) { + for (int i = 0; i < values.length; i++) { + if (i > 0) + sb.append(separator); + sb.append(values[i]); + } + return sb; + } + + static StringBuilder joinAndAppend(BuiltStatement stmt, StringBuilder sb, String separator, Appendeable[] values) { + for (int i = 0; i < values.length; i++) { + if (i > 0) + sb.append(separator); + values[i].appendTo(sb); + if (stmt != null) + stmt.maybeAddRoutingKey(values[i].name(), values[i].firstValue()); + } + return sb; + } + + static StringBuilder joinAndAppendNames(StringBuilder sb, String separator, String[] values) { + for (int i = 0; i < values.length; i++) { + if (i > 0) + sb.append(separator); + appendName(values[i], sb); + } + return sb; + } + + static StringBuilder joinAndAppendValues(StringBuilder sb, String separator, Object[] values) { + for (int i = 0; i < values.length; i++) { + if (i > 0) + sb.append(separator); + appendValue(values[i], sb); + } + return sb; + } + + static StringBuilder appendValue(Object value, StringBuilder sb) { + return appendValue(value, sb, false); + } + + static StringBuilder appendFlatValue(Object value, StringBuilder sb) { + appendFlatValue(value, sb, false); + return sb; + } + + private static StringBuilder appendValue(Object value, StringBuilder sb, boolean rawValue) { + // That is kind of lame but lacking a better solution + if (appendValueIfLiteral(value, sb)) + return sb; + + if (appendValueIfCollection(value, sb, rawValue)) + return sb; + + if (rawValue) + return sb.append(value.toString()); + else + return appendValueString(value.toString(), sb); + } + + private static void appendFlatValue(Object value, StringBuilder sb, boolean rawValue) { + if (appendValueIfLiteral(value, sb)) + return; + + if (rawValue) + sb.append(value.toString()); + else + appendValueString(value.toString(), sb); + } + + private static boolean appendValueIfLiteral(Object value, StringBuilder sb) { + if (value instanceof Integer || value instanceof Long || value instanceof Float || value instanceof Double || value instanceof UUID) { + sb.append(value); + return true; + } else if (value instanceof InetAddress) { + sb.append(((InetAddress)value).getHostAddress()); + return true; + } else if (value instanceof Date) { + sb.append(((Date)value).getTime()); + return true; + } else { + return false; + } + } + + private static boolean appendValueIfCollection(Object value, StringBuilder sb, boolean rawValue) { + if (value instanceof List) { + appendList((List)value, sb, rawValue); + return true; + } else if (value instanceof Set) { + appendSet((Set)value, sb, rawValue); + return true; + } else if (value instanceof Map) { + appendMap((Map)value, sb, rawValue); + return true; + } else { + return false; + } + } + + static StringBuilder appendCollection(Object value, StringBuilder sb) { + boolean wasCollection = appendValueIfCollection(value, sb, false); + assert wasCollection; + return sb; + } + + static StringBuilder appendList(List l, StringBuilder sb) { + return appendList(l, sb, false); + } + + private static StringBuilder appendList(List l, StringBuilder sb, boolean rawValue) { + sb.append("["); + for (int i = 0; i < l.size(); i++) { + if (i > 0) + sb.append(","); + appendFlatValue(l.get(i), sb, rawValue); + } + sb.append("]"); + return sb; + } + + static StringBuilder appendSet(Set s, StringBuilder sb) { + return appendSet(s, sb, false); + } + + private static StringBuilder appendSet(Set s, StringBuilder sb, boolean rawValue) { + sb.append("{"); + boolean first = true; + for (Object elt : s) { + if (first) first = false; else sb.append(","); + appendFlatValue(elt, sb, rawValue); + } + sb.append("}"); + return sb; + } + + static StringBuilder appendMap(Map m, StringBuilder sb) { + return appendMap(m, sb, false); + } + + private static StringBuilder appendMap(Map m, StringBuilder sb, boolean rawValue) { + sb.append("{"); + boolean first = true; + for (Map.Entry entry : m.entrySet()) { + if (first) + first = false; + else + sb.append(","); + appendFlatValue(entry.getKey(), sb, rawValue); + sb.append(":"); + appendFlatValue(entry.getValue(), sb, rawValue); + } + sb.append("}"); + return sb; + } + + private static StringBuilder appendValueString(String value, StringBuilder sb) { + return sb.append("'").append(value.replace("'", "''")).append("'"); + } + + static String toRawString(Object value) { + return appendValue(value, new StringBuilder(), true).toString(); + } + + static StringBuilder appendName(String name, StringBuilder sb) { + name = name.trim(); + if (cnamePattern.matcher(name).matches() || name.startsWith("\"") || fctsPattern.matcher(name).matches()) + sb.append(name); + else + sb.append("\"").append(name).append("\""); + return sb; + } + + static abstract class Appendeable { + abstract void appendTo(StringBuilder sb); + + abstract String name(); + abstract Object firstValue(); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java new file mode 100644 index 00000000000..1329f4316ae --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java @@ -0,0 +1,66 @@ +package com.datastax.driver.core.utils; + +import java.nio.ByteBuffer; +import java.util.*; + +import org.junit.Test; +import static org.junit.Assert.*; + +import com.datastax.driver.core.*; +import static com.datastax.driver.core.utils.querybuilder.Assignment.*; +import static com.datastax.driver.core.utils.querybuilder.QueryBuilder.*; +import static com.datastax.driver.core.utils.querybuilder.Clause.*; +import static com.datastax.driver.core.utils.querybuilder.Ordering.*; +import static com.datastax.driver.core.utils.querybuilder.Using.*; + +public class QueryBuilderRoutingKeyTest extends CCMBridge.PerClassSingleNodeCluster { + + private static final String TABLE_TEXT = "test_text"; + private static final String TABLE_INT = "test_int"; + + protected Collection getTableDefinitions() { + return Arrays.asList(String.format("CREATE TABLE %s (k text PRIMARY KEY, a int, b int)", TABLE_TEXT), + String.format("CREATE TABLE %s (k int PRIMARY KEY, a int, b int)", TABLE_INT)); + } + + @Test + public void textRoutingKeyTest() throws Exception { + + CQLStatement query; + TableMetadata table = cluster.getMetadata().getKeyspace(TestUtils.SIMPLE_KEYSPACE).getTable(TABLE_TEXT); + assertNotNull(table); + + String txt = "If she weighs the same as a duck... she's made of wood."; + query = insert("k", "a", "b").into(table).values(txt, 1, 2); + assertEquals(ByteBuffer.wrap(txt.getBytes()), query.getRoutingKey()); + session.execute(query); + + query = select().from(table).where(eq("k", txt)); + assertEquals(ByteBuffer.wrap(txt.getBytes()), query.getRoutingKey()); + CQLRow row = session.execute(query).fetchOne(); + assertEquals(txt, row.getString("k")); + assertEquals(1, row.getInt("a")); + assertEquals(2, row.getInt("b")); + } + + @Test + public void intRoutingKeyTest() throws Exception { + + CQLStatement query; + TableMetadata table = cluster.getMetadata().getKeyspace(TestUtils.SIMPLE_KEYSPACE).getTable(TABLE_INT); + assertNotNull(table); + + query = insert("k", "a", "b").into(table).values(42, 1, 2); + ByteBuffer bb = ByteBuffer.allocate(4); + bb.putInt(0, 42); + assertEquals(bb, query.getRoutingKey()); + session.execute(query); + + query = select().from(table).where(eq("k", 42)); + assertEquals(bb, query.getRoutingKey()); + CQLRow row = session.execute(query).fetchOne(); + assertEquals(42, row.getInt("k")); + assertEquals(1, row.getInt("a")); + assertEquals(2, row.getInt("b")); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderTest.java new file mode 100644 index 00000000000..5e2d1a78c12 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderTest.java @@ -0,0 +1,109 @@ +package com.datastax.driver.core.utils; + +import java.net.InetAddress; +import java.util.*; + +import org.junit.Test; +import static org.junit.Assert.*; + +import com.datastax.driver.core.utils.querybuilder.*; +import static com.datastax.driver.core.utils.querybuilder.Assignment.*; +import static com.datastax.driver.core.utils.querybuilder.QueryBuilder.*; +import static com.datastax.driver.core.utils.querybuilder.Clause.*; +import static com.datastax.driver.core.utils.querybuilder.Ordering.*; +import static com.datastax.driver.core.utils.querybuilder.Using.*; + +public class QueryBuilderTest { + + @Test + public void selectTest() throws Exception { + + String query; + Select select; + + query = "SELECT * FROM foo WHERE k=4 AND c>'a' AND c<='z';"; + select = select(all()).from("foo").where(eq("k", 4), gt("c", "a"), lte("c", "z")); + assertEquals(query, select.toString()); + + query = "SELECT a,b,\"C\" FROM foo WHERE a IN (127.0.0.1,127.0.0.3) AND \"C\"='foo' ORDER BY (a ASC,b DESC) LIMIT 42;"; + select = select("a", "b", quote("C")).from("foo") + .where(in("a", InetAddress.getByName("127.0.0.1"), InetAddress.getByName("127.0.0.3")), + eq(quote("C"), "foo")) + .orderBy(asc("a"), desc("b")) + .limit(42); + assertEquals(query, select.toString()); + + query = "SELECT writetime(a),ttl(a) FROM foo;"; + select = select(writeTime("a"), ttl("a")).from("foo"); + assertEquals(query, select.toString()); + + query = "SELECT count(*) FROM foo;"; + select = select(count()).from("foo"); + assertEquals(query, select.toString()); + } + + @Test + public void insertTest() throws Exception { + + String query; + Insert insert; + + query = "INSERT INTO foo(a,b,\"C\",d) VALUES (123,127.0.0.1,'foo''bar',{'x':3,'y':2}) USING TIMESTAMP 42 AND TTL 24;"; + insert = insert("a", "b", quote("C"), "d").into("foo") + .values(123, InetAddress.getByName("127.0.0.1"), "foo'bar", new TreeMap(){{ put("x", 3); put("y", 2); }}) + .using(timestamp(42), ttl(24)); + assertEquals(query, insert.toString()); + + query = "INSERT INTO foo(a,b) VALUES ({2,3,4},3.4) USING TTL 24 AND TIMESTAMP 42;"; + insert = insert("a", "b").into("foo").values(new TreeSet(){{ add(2); add(3); add(4); }}, 3.4).using(ttl(24), timestamp(42)); + assertEquals(query, insert.toString()); + } + + @Test + public void updateTest() throws Exception { + + String query; + Update update; + + query = "UPDATE foo.bar USING TIMESTAMP 42 SET a=12,b=[3,2,1],c=c+3 WHERE k=2;"; + update = update("foo", "bar").using(timestamp(42)).set(set("a", 12), set("b", Arrays.asList(3, 2, 1)), incr("c", 3)).where(eq("k", 2)); + assertEquals(query, update.toString()); + + query = "UPDATE foo SET a[2]='foo',b=[3,2,1]+b,c=c-{'a'} WHERE k=2;"; + update = update("foo").set(setIdx("a", 2, "foo"), prependAll("b", Arrays.asList(3, 2, 1)), remove("c", "a")).where(eq("k", 2)); + assertEquals(query, update.toString()); + } + + @Test + public void deleteTest() throws Exception { + + String query; + Delete delete; + + query = "DELETE a,b,c FROM foo USING TIMESTAMP 0 WHERE k=1;"; + delete = delete("a", "b", "c").from("foo").using(timestamp(0)).where(eq("k", 1)); + assertEquals(query, delete.toString()); + + query = "DELETE a[3],b['foo'],c FROM foo WHERE k=1;"; + delete = delete(listElt("a", 3), mapElt("b", "foo"), "c").from("foo").where(eq("k", 1)); + assertEquals(query, delete.toString()); + } + + @Test + public void batchTest() throws Exception { + String query; + Batch batch; + + query = "BEGIN BATCH USING TIMESTAMP 42 "; + query += "INSERT INTO foo(a,b) VALUES ({2,3,4},3.4);"; + query += "UPDATE foo SET a[2]='foo',b=[3,2,1]+b,c=c-{'a'} WHERE k=2;"; + query += "DELETE a[3],b['foo'],c FROM foo WHERE k=1;"; + query += "APPLY BATCH;"; + batch = batch( + insert("a", "b").into("foo").values(new TreeSet(){{ add(2); add(3); add(4); }}, 3.4), + update("foo").set(setIdx("a", 2, "foo"), prependAll("b", Arrays.asList(3, 2, 1)), remove("c", "a")).where(eq("k", 2)), + delete(listElt("a", 3), mapElt("b", "foo"), "c").from("foo").where(eq("k", 1)) + ).using(timestamp(42)); + assertEquals(query, batch.toString()); + } +} From 12e2b1349c2c80fcbe90c9257507324144d7d1fc Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 16 Nov 2012 12:52:27 +0100 Subject: [PATCH 076/719] Refactory/simplify policies API --- .../core/AbstractReconnectionHandler.java | 14 +- .../com/datastax/driver/core/Cluster.java | 44 +- .../driver/core/ControlConnection.java | 7 +- .../java/com/datastax/driver/core/Query.java | 10 +- .../com/datastax/driver/core/Session.java | 2 +- .../datastax/driver/core/SimpleStatement.java | 2 +- .../policies/ConstantReconnectionPolicy.java | 42 ++ .../policies/DCAwareRoundRobinPolicy.java | 214 ++++++++ .../core/policies/DefaultRetryPolicy.java | 105 ++++ .../DowngradingConsistencyRetryPolicy.java | 168 ++++++ .../ExponentialReconnectionPolicy.java | 60 +++ .../core/policies/FallthroughRetryPolicy.java | 28 + .../core/policies/LoadBalancingPolicy.java | 482 +----------------- .../core/policies/LoggingRetryPolicy.java | 79 +++ .../driver/core/policies/Policies.java | 61 ++- .../core/policies/ReconnectionPolicy.java | 138 +---- .../driver/core/policies/RetryPolicy.java | 345 ------------- .../core/policies/RoundRobinPolicy.java | 108 ++++ .../core/policies/TokenAwarePolicy.java | 127 +++++ .../driver/core/LoadBalancingPolicyTest.java | 6 +- 20 files changed, 1061 insertions(+), 981 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java index d43cc8ae636..cf6bba19446 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AbstractReconnectionHandler.java @@ -14,15 +14,15 @@ abstract class AbstractReconnectionHandler implements Runnable { private static final Logger logger = LoggerFactory.getLogger(AbstractReconnectionHandler.class); private final ScheduledExecutorService executor; - private final ReconnectionPolicy policy; + private final ReconnectionPolicy.ReconnectionSchedule schedule; private final AtomicReference currentAttempt; private volatile boolean readyForNext; private volatile ScheduledFuture localFuture; - public AbstractReconnectionHandler(ScheduledExecutorService executor, ReconnectionPolicy policy, AtomicReference currentAttempt) { + public AbstractReconnectionHandler(ScheduledExecutorService executor, ReconnectionPolicy.ReconnectionSchedule schedule, AtomicReference currentAttempt) { this.executor = executor; - this.policy = policy; + this.schedule = schedule; this.currentAttempt = currentAttempt; } @@ -36,7 +36,7 @@ public AbstractReconnectionHandler(ScheduledExecutorService executor, Reconnecti protected boolean onAuthenticationException(AuthenticationException e, long nextDelayMs) { return false; } public void start() { - long firstDelay = policy.nextDelayMs(); + long firstDelay = schedule.nextDelayMs(); logger.debug("First reconnection scheduled in {}ms", firstDelay); localFuture = executor.schedule(this, firstDelay, TimeUnit.MILLISECONDS); @@ -66,14 +66,14 @@ public void run() { onReconnection(tryReconnect()); currentAttempt.compareAndSet(localFuture, null); } catch (ConnectionException e) { - long nextDelay = policy.nextDelayMs(); + long nextDelay = schedule.nextDelayMs(); if (onConnectionException(e, nextDelay)) reschedule(nextDelay); else currentAttempt.compareAndSet(localFuture, null); } catch (AuthenticationException e) { logger.error(e.getMessage()); - long nextDelay = policy.nextDelayMs(); + long nextDelay = schedule.nextDelayMs(); if (onAuthenticationException(e, nextDelay)) { reschedule(nextDelay); } else { @@ -81,7 +81,7 @@ public void run() { currentAttempt.compareAndSet(localFuture, null); } } catch (Exception e) { - long nextDelay = policy.nextDelayMs(); + long nextDelay = schedule.nextDelayMs(); if (onUnknownException(e, nextDelay)) reschedule(nextDelay); else diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 389cc593e5c..38d83b21dfc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -61,6 +61,7 @@ public class Cluster { private Cluster(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { this.manager = new Manager(contactPoints, port, policies, authProvider); + this.manager.init(); } /** @@ -198,8 +199,8 @@ public static class Builder implements Initializer { private int port = DEFAULT_PORT; private AuthInfoProvider authProvider = AuthInfoProvider.NONE; - private LoadBalancingPolicy.Factory loadBalancingPolicyFactory; - private ReconnectionPolicy.Factory reconnectionPolicyFactory; + private LoadBalancingPolicy loadBalancingPolicy; + private ReconnectionPolicy reconnectionPolicy; private RetryPolicy retryPolicy; public List getContactPoints() { @@ -296,30 +297,30 @@ public Builder addContactPoints(InetAddress... addresses) { } /** - * Configure the load balancing policy (factory) to use for the new cluster. + * Configure the load balancing policy to use for the new cluster. *

- * If no load balancing policy factory is set through this method, - * {@link Policies#DEFAULT_LOAD_BALANCING_POLICY_FACTORY} will be used instead. + * If no load balancing policy is set through this method, + * {@link Policies#DEFAULT_LOAD_BALANCING_POLICY} will be used instead. * - * @param factory the load balancing policy factory to use + * @param policy the load balancing policy to use * @return this Builder */ - public Builder withLoadBalancingPolicyFactory(LoadBalancingPolicy.Factory factory) { - this.loadBalancingPolicyFactory = factory; + public Builder withLoadBalancingPolicy(LoadBalancingPolicy policy) { + this.loadBalancingPolicy = policy; return this; } /** - * Configure the reconnection policy (factory) to use for the new cluster. + * Configure the reconnection policy to use for the new cluster. *

- * If no reconnection policy factory is set through this method, - * {@link Policies#DEFAULT_RECONNECTION_POLICY_FACTORY} will be used instead. + * If no reconnection policy is set through this method, + * {@link Policies#DEFAULT_RECONNECTION_POLICY} will be used instead. * - * @param factory the reconnection policy factory to use + * @param policy the reconnection policy to use * @return this Builder */ - public Builder withReconnectionPolicyFactory(ReconnectionPolicy.Factory factory) { - this.reconnectionPolicyFactory = factory; + public Builder withReconnectionPolicy(ReconnectionPolicy policy) { + this.reconnectionPolicy = policy; return this; } @@ -348,8 +349,8 @@ public Builder withRetryPolicy(RetryPolicy policy) { */ public Policies getPolicies() { return new Policies( - loadBalancingPolicyFactory == null ? Policies.DEFAULT_LOAD_BALANCING_POLICY_FACTORY : loadBalancingPolicyFactory, - reconnectionPolicyFactory == null ? Policies.DEFAULT_RECONNECTION_POLICY_FACTORY : reconnectionPolicyFactory, + loadBalancingPolicy == null ? Policies.DEFAULT_LOAD_BALANCING_POLICY : loadBalancingPolicy, + reconnectionPolicy == null ? Policies.DEFAULT_RECONNECTION_POLICY : reconnectionPolicy, retryPolicy == null ? Policies.DEFAULT_RETRY_POLICY : retryPolicy ); } @@ -476,7 +477,14 @@ private Manager(List contactPoints, int port, Policies policies, Au addHost(address, false); this.controlConnection = new ControlConnection(this, metadata); - controlConnection.connect(); + this.controlConnection.connect(); + } + + // This is separated from the constructor because this reference the + // Cluster object, whose manager won't be properly initialized until + // the constructor returns. + private void init() { + this.configuration.getPolicies().getLoadBalancingPolicy().init(Cluster.this, metadata.getAllHosts()); } Cluster getCluster() { @@ -528,7 +536,7 @@ public void onDown(final Host host) { // Note: we basically waste the first successful reconnection, but it's probably not a big deal logger.debug("{} is down, scheduling connection retries", host); - new AbstractReconnectionHandler(reconnectionExecutor, configuration.getPolicies().getReconnectionPolicyFactory().create(), host.reconnectionAttempt) { + new AbstractReconnectionHandler(reconnectionExecutor, configuration.getPolicies().getReconnectionPolicy().newSchedule(), host.reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { return connectionFactory.open(host); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index 26cc229bf9d..e9f906e8f32 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -42,14 +42,15 @@ class ControlConnection implements Host.StateListener { private final Cluster.Manager cluster; private final LoadBalancingPolicy balancingPolicy; - private final ReconnectionPolicy.Factory reconnectionPolicyFactory = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); + private final ReconnectionPolicy reconnectionPolicy = new ExponentialReconnectionPolicy(2 * 1000, 5 * 60 * 1000); private final AtomicReference reconnectionAttempt = new AtomicReference(); private volatile boolean isShutdown; public ControlConnection(Cluster.Manager manager, ClusterMetadata metadata) { this.cluster = manager; - this.balancingPolicy = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE.create(manager.getCluster(), metadata.allHosts()); + this.balancingPolicy = new RoundRobinPolicy(); + this.balancingPolicy.init(manager.getCluster(), metadata.allHosts()); } // Only for the initial connection. Does not schedule retries if it fails @@ -74,7 +75,7 @@ private void reconnect() { setNewConnection(reconnectInternal()); } catch (NoHostAvailableException e) { logger.error("[Control connection] Cannot connect to any host, scheduling retry"); - new AbstractReconnectionHandler(cluster.reconnectionExecutor, reconnectionPolicyFactory.create(), reconnectionAttempt) { + new AbstractReconnectionHandler(cluster.reconnectionExecutor, reconnectionPolicy.newSchedule(), reconnectionAttempt) { protected Connection tryReconnect() throws ConnectionException { try { return reconnectInternal(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Query.java b/driver-core/src/main/java/com/datastax/driver/core/Query.java index 181c0970ee9..c3075b8e32c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Query.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Query.java @@ -82,11 +82,11 @@ public boolean isTracing() { *

* The routing key is optional in the sense that implementers are free to * return {@code null}. The routing key is an hint used for token aware routing (see - * {@link LoadBalancingPolicy.TokenAware}), and if provided should - * correspond to the binary value for the query partition key. However, not - * providing a routing key never causes a query to fail and if the load - * balancing policy used is not token aware, then the routing key can be - * safely ignored. + * {@link com.datastax.driver.core.policies.TokenAwarePolicy}), and + * if provided should correspond to the binary value for the query + * partition key. However, not providing a routing key never causes a query + * to fail and if the load balancing policy used is not token aware, then + * the routing key can be safely ignored. * * @return the routing key for this query or {@code null}. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 4d35d1a7574..857776b9b27 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -248,7 +248,7 @@ public Manager(Cluster cluster, Collection hosts) { this.cluster = cluster; this.pools = new ConcurrentHashMap(hosts.size()); - this.loadBalancer = cluster.manager.configuration.getPolicies().getLoadBalancingPolicyFactory().create(cluster, hosts); + this.loadBalancer = cluster.manager.configuration.getPolicies().getLoadBalancingPolicy(); this.poolsState = new HostConnectionPool.PoolState(); for (Host host : hosts) diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java index 15cd27a7bbb..d5e0e4055ea 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -53,7 +53,7 @@ public ByteBuffer getRoutingKey() { * load balancing policy but is never mandatory. *

* If the partition key for the query is composite, use the - * {@link #setPartitionKey(ByteBuffer...)} method instead to build the + * {@link #setRoutingKey(ByteBuffer...)} method instead to build the * routing key. * * @param routingKey the raw (binary) value to use as routing key. diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java new file mode 100644 index 00000000000..ca902c5f805 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java @@ -0,0 +1,42 @@ +package com.datastax.driver.core.policies; + +/** + * A reconnection policy that waits a constant time between each reconnection attempt. + */ +public class ConstantReconnectionPolicy implements ReconnectionPolicy { + + private final long delayMs; + + /** + * Creates a reconnection policy that creates with the provided constant wait + * time. + * + * @param constantDelayMs the constant delay in milliseconds to use. + * @return a reconnection policy factory that creates {@code + * Reconnection.Constant} policies with a {@code constantDelayMs} + * milliseconds delay between reconnection attempts. + */ + public ConstantReconnectionPolicy(long constantDelayMs) { + if (constantDelayMs < 0) + throw new IllegalArgumentException(String.format("Invalid negative delay (got %d)", constantDelayMs)); + + this.delayMs = constantDelayMs; + } + + public ConstantSchedule newSchedule() { + return new ConstantSchedule(); + } + + public class ConstantSchedule implements ReconnectionSchedule { + + /** + * The delay before the next reconnection. + * + * @return the fixed delay set by the {@code + * ConstantReconnectionPolicy} that created this schedule. + */ + public long nextDelayMs() { + return delayMs; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java new file mode 100644 index 00000000000..4ad269d8265 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java @@ -0,0 +1,214 @@ +package com.datastax.driver.core.policies; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; + +import com.google.common.collect.AbstractIterator; + +import com.datastax.driver.core.*; + +/** + * A data-center aware Round-robin load balancing policy. + *

+ * This policy provides round-robin queries over the node of the local + * datacenter. It also includes in the query plans returned a configurable + * number of hosts in the remote datacenters, but those are always tried + * after the local nodes. In other words, this policy guarantees that no + * host in a remote datacenter will be queried unless no host in the local + * datacenter can be reached. + *

+ * If used with a single datacenter, this policy is equivalent to the + * {@code LoadBalancingPolicy.RoundRobin} policy, but its DC awareness + * incurs a slight overhead so the {@code LoadBalancingPolicy.RoundRobin} + * policy could be prefered to this policy in that case. + */ +public class DCAwareRoundRobinPolicy implements LoadBalancingPolicy { + + private final ConcurrentMap> perDcLiveHosts = new ConcurrentHashMap>(); + private final AtomicInteger index = new AtomicInteger(); + private final String localDc; + private final int usedHostsPerRemoteDc; + + /** + * Creates a new datacenter aware round robin policy given the name of + * the local datacenter. + *

+ * The name of the local datacenter provided must be the local + * datacenter name as known by Cassandra. + *

+ * The policy created will ignore all remote hosts. In other words, + * this is equivalent to {@code new DCAwareRoundRobinPolicy(localDc, 0)}. + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). + */ + public DCAwareRoundRobinPolicy(String localDc) { + this(localDc, 0); + } + + /** + * Creates a new DCAwareRoundRobin policy given the name of the local + * datacenter and that uses the provided number of host per remote + * datacenter as failover for the local hosts. + *

+ * The name of the local datacenter provided must be the local + * datacenter name as known by Cassandra. + * + * @param localDc the name of the local datacenter (as known by + * Cassandra). + * @param usedHostsPerRemoteDc the number of host per remote + * datacenter that policies created by the returned factory should + * consider. Created policies {@code distance} method will return a + * {@code HostDistance.REMOTE} distance for only {@code + * usedHostsPerRemoteDc} hosts per remote datacenter. Other hosts + * of the remote datacenters will be ignored (and thus no + * connections to them will be maintained). + */ + public DCAwareRoundRobinPolicy(String localDc, int usedHostsPerRemoteDc) { + this.localDc = localDc; + this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; + } + + public void init(Cluster cluster, Collection hosts) { + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + + for (Host host : hosts) { + String dc = dc(host); + CopyOnWriteArrayList prev = perDcLiveHosts.get(dc); + if (prev == null) + perDcLiveHosts.put(dc, new CopyOnWriteArrayList(Collections.singletonList(host))); + else + prev.addIfAbsent(host); + } + } + + private String dc(Host host) { + String dc = host.getDatacenter(); + return dc == null ? localDc : dc; + } + + /** + * Return the HostDistance for the provided host. + *

+ * This policy consider nodes in the local datacenter as {@code LOCAL}. + * For each remote datacenter, it considers a configurable number of + * hosts as {@code REMOTE} and the rest is {@code IGNORED}. + *

+ * To configure how many host in each remote datacenter is considered + * {@code REMOTE}, see {@link Factory#create(String, int)}. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + public HostDistance distance(Host host) { + String dc = dc(host); + if (dc.equals(localDc)) + return HostDistance.LOCAL; + + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null || usedHostsPerRemoteDc == 0) + return HostDistance.IGNORED; + + // We need to clone, otherwise our subList call is not thread safe + dcHosts = (CopyOnWriteArrayList)dcHosts.clone(); + return dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)).contains(host) + ? HostDistance.REMOTE + : HostDistance.IGNORED; + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will always try each known host in the local + * datacenter first, and then, if none of the local host is reacheable, + * will try up to a configurable number of other host per remote datacenter. + * The order of the local node in the returned query plan will follow a + * Round-robin algorithm. + * + * @param query the query for which to build the plan. + * @return a new query plan, i.e. an iterator indicating which host to + * try first for querying, which one to use as failover, etc... + */ + public Iterator newQueryPlan(Query query) { + + CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); + final List hosts = localLiveHosts == null ? Collections.emptyList() : (List)localLiveHosts.clone(); + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) + index.set(0); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remainingLocal = hosts.size(); + + // For remote Dcs + private Iterator remoteDcs; + private List currentDcHosts; + private int currentDcRemaining; + + protected Host computeNext() { + if (remainingLocal > 0) { + remainingLocal--; + return hosts.get(idx++ % hosts.size()); + } + + if (currentDcHosts != null && currentDcRemaining > 0) { + currentDcRemaining--; + return currentDcHosts.get(idx++ % currentDcHosts.size()); + } + + if (remoteDcs == null) { + Set copy = new HashSet(perDcLiveHosts.keySet()); + copy.remove(localDc); + remoteDcs = copy.iterator(); + } + + if (!remoteDcs.hasNext()) + return endOfData(); + + String nextRemoteDc = remoteDcs.next(); + CopyOnWriteArrayList nextDcHosts = perDcLiveHosts.get(nextRemoteDc); + if (nextDcHosts != null) { + currentDcHosts = (List)nextDcHosts.clone(); + currentDcRemaining = Math.min(usedHostsPerRemoteDc, currentDcHosts.size()); + } + + return computeNext(); + } + }; + } + + public void onUp(Host host) { + String dc = dc(host); + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); + if (dcHosts == null) { + CopyOnWriteArrayList newMap = new CopyOnWriteArrayList(Collections.singletonList(host)); + dcHosts = perDcLiveHosts.putIfAbsent(dc, newMap); + // If we've successfully put our new host, we're good, otherwise we've been beaten so continue + if (dcHosts == null) + return; + } + dcHosts.addIfAbsent(host); + } + + public void onDown(Host host) { + CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc(host)); + if (dcHosts != null) + dcHosts.remove(host); + } + + public void onAdd(Host host) { + onUp(host); + } + + public void onRemove(Host host) { + onDown(host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java new file mode 100644 index 00000000000..47e9737eea7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DefaultRetryPolicy.java @@ -0,0 +1,105 @@ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.*; + +/** + * The default retry policy. + *

+ * This policy retries queries in only two cases: + *

    + *
  • On a read timeout, if enough replica replied but data was not retrieved.
  • + *
  • On a write timeout, if we timeout while writting the distributed log used by batch statements.
  • + *
+ *

+ * This retry policy is conservative in that it will never retry with a + * different consistency level than the one of the initial operation. + *

+ * In some cases, it may be convenient to use a more aggressive retry policy + * like {@link DowngradingConsistencyRetryPolicy}. + */ +public class DefaultRetryPolicy implements RetryPolicy { + + public static final DefaultRetryPolicy INSTANCE = new DefaultRetryPolicy(); + + private DefaultRetryPolicy() {} + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * This method triggers a maximum of one retry, and only if enough + * replica had responded to the read request but data was not retrieved + * amongst those. Indeed, that case usually means that enough replica + * are alive to satisfy the consistency but the coordinator picked a + * dead one for data retrieval, not having detecte that replica as dead + * yet. The reasoning for retrying then is that by the time we get the + * timeout the dead replica will likely have been detected as dead and + * the retry has a high change of success. + * + * @param cl the original consistency level of the read that timeouted. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and + * {@code receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} otherwise. + */ + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + return receivedResponses >= requiredResponses && !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + *

+ * This method triggers a maximum of one retry, and only in the case of + * a {@code WriteType.BATCH_LOG} write. The reasoning for the retry in + * that case is that write to the distributed batch log is tried by the + * coordinator of the write against a small subset of all the node alive + * in the local datacenter. Hence, a timeout usually means that none of + * the nodes in that subset were alive but the coordinator hasn't + * detected them as dead. By the time we get the timeout the dead + * nodes will likely have been detected as dead and the retry has thus a + * high change of success. + * + * @param cl the original consistency level of the write that timeouted. + * @param writeType the type of the write that timeouted. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and + * {@code writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise. + */ + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // If the batch log write failed, retry the operation as this might just be we were unlucky at picking candidtes + return writeType == WriteType.BATCH_LOG ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + *

+ * This method never retries as a retry on an unavailable exception + * using the same consistency level has almost no change of success. + * + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + return RetryDecision.rethrow(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java new file mode 100644 index 00000000000..c67a2648a40 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DowngradingConsistencyRetryPolicy.java @@ -0,0 +1,168 @@ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.*; + +/** + * A retry policy that sometimes retry with a lower consistency level than + * the one initially requested. + *

+ * BEWARE: This policy may retry queries using a lower consistency + * level than the one initially requested. By doing so, it may break + * consistency guarantees. In other words, if you use this retry policy, + * there is cases (documented below) where a read at {@code QUORUM} + * may not see a preceding write at {@code QUORUM}. Do not use this + * policy unless you have understood the cases where this can happen and + * are ok with that. It is also highly recommended to always wrap this + * policy into {@link LoggingRetryPolicy} to log the occurences of + * such consistency break. + *

+ * This policy implements the same retries than the {@link DefaultRetryPolicy} + * policy. But on top of that, it also retries in the following cases: + *

    + *
  • On a read timeout: if the number of replica that responded is + * greater than one but lower than is required by the requested + * consistency level, the operation is retried at a lower concistency + * level.
  • + *
  • On a write timeout: if the operation is an {@code + * WriteType.UNLOGGED_BATCH} and at least one replica acknowleged the + * write, the operation is retried at a lower consistency level. + * Furthermore, for other operation, if at least one replica acknowleged + * the write, the timeout is ignored.
  • + *
  • On an unavailable exception: if at least one replica is alive, the + * operation is retried at a lower consistency level.
  • + *
+ *

+ * The reasoning behing this retry policy is the following one. If, based + * on the information the Cassandra coordinator node returns, retrying the + * operation with the initally requested consistency has a change to + * succeed, do it. Otherwise, if based on these informations we know the + * initially requested consistency level cannot be achieve currently, then: + *

    + *
  • For writes, ignore the exception (thus silently failing the + * consistency requirement) if we know the write has been persisted on at + * least one replica.
  • + *
  • For reads, try reading at a lower consistency level (thus silently + * failing the consistency requirement).
  • + *
+ * In other words, this policy implements the idea that if the requested + * consistency level cannot be achieved, the next best thing for writes is + * to make sure the data is persisted, and that reading something is better + * than reading nothing, even if there is a risk of reading stale data. + */ +public class DowngradingConsistencyRetryPolicy implements RetryPolicy { + + public static final DowngradingConsistencyRetryPolicy INSTANCE = new DowngradingConsistencyRetryPolicy(); + + private DowngradingConsistencyRetryPolicy() {} + + private RetryDecision maxLikelyToWorkCL(int knownOk) { + if (knownOk >= 3) + return RetryDecision.retry(ConsistencyLevel.THREE); + else if (knownOk >= 2) + return RetryDecision.retry(ConsistencyLevel.TWO); + else if (knownOk >= 1) + return RetryDecision.retry(ConsistencyLevel.ONE); + else + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a read timeout. + *

+ * This method triggers a maximum of one retry. If less replica + * responsed than required by the consistency level (but at least one + * replica did respond), the operation is retried at a lower + * consistency level. If enough replica responded but data was not + * retrieve, the operation is retried with the initial consistency + * level. Otherwise, an exception is thrown. + * + * @param cl the original consistency level of the read that timeouted. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + if (receivedResponses < requiredResponses) { + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(receivedResponses); + } + + return !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on a write timeout. + *

+ * This method triggers a maximum of one retry. If {@code writeType == + * WriteType.BATCH_LOG}, the write is retried with the initial + * consistency level. If {@code writeType == WriteType.UNLOGGED_BATCH} + * and at least one replica acknowleged, the write is retried with a + * lower consistency level (with unlogged batch, a write timeout can + * always mean that part of the batch haven't been persisted at + * all, even if {@code receivedAcks > 0}). For other {@code writeType}, + * if we know the write has been persisted on at least one replica, we + * ignore the exception. Otherwise, an exception is thrown. + * + * @param cl the original consistency level of the write that timeouted. + * @param writeType the type of the write that timeouted. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + switch (writeType) { + case SIMPLE: + case BATCH: + // Since we provide atomicity there is no point in retrying + return RetryDecision.ignore(); + case COUNTER: + // We should not retry counters, period! + return RetryDecision.ignore(); + case UNLOGGED_BATCH: + // Since only part of the batch could have been persisted, + // retry with whatever consistency should allow to persist all + return maxLikelyToWorkCL(receivedAcks); + case BATCH_LOG: + return RetryDecision.retry(cl); + } + return RetryDecision.rethrow(); + } + + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + *

+ * This method triggers a maximum of one retry. If at least one replica + * is know to be alive, the operation is retried at a lower consistency + * level. + * + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return a RetryDecision as defined above. + */ + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + if (nbRetry != 0) + return RetryDecision.rethrow(); + + // Tries the biggest CL that is expected to work + return maxLikelyToWorkCL(aliveReplica); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java new file mode 100644 index 00000000000..b252ffd454d --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java @@ -0,0 +1,60 @@ +package com.datastax.driver.core.policies; + +/** + * A reconnection policy that waits exponentially longer between each + * reconnection attempt (but keeps a constant delay once a maximum delay is + * reached). + */ +public class ExponentialReconnectionPolicy implements ReconnectionPolicy { + + private final long baseDelayMs; + private final long maxDelayMs; + + /** + * Creates a reconnection policy waiting exponentially longer for each new attempt. + * + * @param baseDelayMs the base delay in milliseconds to use for + * the schedules created by this policy. + * @param maxDelayMs the maximum delay to wait between two attempts. + */ + public ExponentialReconnectionPolicy(long baseDelayMs, long maxDelayMs) { + if (baseDelayMs < 0 || maxDelayMs < 0) + throw new IllegalArgumentException("Invalid negative delay"); + if (maxDelayMs < baseDelayMs) + throw new IllegalArgumentException(String.format("maxDelayMs (got %d) cannot be smaller than baseDelayMs (got %d)", maxDelayMs, baseDelayMs)); + + this.baseDelayMs = baseDelayMs; + this.maxDelayMs = maxDelayMs; + } + + public ExponentialSchedule newSchedule() { + return new ExponentialSchedule(); + } + + public class ExponentialSchedule implements ReconnectionSchedule { + + private int attempts; + + /** + * The delay before the next reconnection. + *

+ * For this schedule, reconnection attempt {@code i} will be tried + * {@code 2^i * baseDelayMs} milliseconds after the previous one + * (unless {@code maxDelayMs} has been reached, in which case all + * following attempts will be done with a delay of {@code maxDelayMs}), + * where {@code baseDelayMs} (and {@code maxDelayMs}) are the + * delays sets by the {@code ExponentialReconnectionPolicy} from + * which this schedule has been created. + * + * @return the delay before the next reconnection. + */ + public long nextDelayMs() { + // We "overflow" at 64 attempts but I doubt this matter + if (attempts >= 64) + return maxDelayMs; + + long next = baseDelayMs * (1L << attempts++); + return next > maxDelayMs ? maxDelayMs : next; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java new file mode 100644 index 00000000000..5474de2d678 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java @@ -0,0 +1,28 @@ +package com.datastax.driver.core.policies; + +import com.datastax.driver.core.*; + +/** + * A retry policy that never retry (nor ignore). + *

+ * All of the methods of this retry policy unconditionally return {@link RetryPolicy.RetryDecision#rethrow}. + * If this policy is used, retry will have to be implemented in business code. + */ +public class FallthroughRetryPolicy { + + public static final FallthroughRetryPolicy INSTANCE = new FallthroughRetryPolicy(); + + private FallthroughRetryPolicy() {} + + public RetryPolicy.RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + return RetryPolicy.RetryDecision.rethrow(); + } + + public RetryPolicy.RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + return RetryPolicy.RetryDecision.rethrow(); + } + + public RetryPolicy.RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + return RetryPolicy.RetryDecision.rethrow(); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java index 5b9709822c5..08ed71b4c1f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoadBalancingPolicy.java @@ -1,11 +1,7 @@ package com.datastax.driver.core.policies; -import java.nio.ByteBuffer; -import java.util.*; -import java.util.concurrent.*; -import java.util.concurrent.atomic.AtomicInteger; - -import com.google.common.collect.AbstractIterator; +import java.util.Collection; +import java.util.Iterator; import com.datastax.driver.core.*; @@ -26,6 +22,18 @@ */ public interface LoadBalancingPolicy extends Host.StateListener { + /** + * Initialize this load balancing policy. + *

+ * Note that the driver guarantees that it will call this method exactly + * once per policy object and will do so before any call to another of the + * methods of the policy. + * + * @param cluster the {@code Cluster} instance for which the policy is created. + * @param hosts the initial hosts to use. + */ + public void init(Cluster cluster, Collection hosts); + /** * Returns the distance assigned by this policy to the provided host. *

@@ -59,466 +67,4 @@ public interface LoadBalancingPolicy extends Host.StateListener { * successfully to one of the host. */ public Iterator newQueryPlan(Query query); - - /** - * Simple factory interface to allow creating {@link LoadBalancingPolicy} instances. - */ - public interface Factory { - - /** - * Creates a new LoadBalancingPolicy instance over the provided (initial) {@code hosts}. - * - * @param cluster the {@code Cluster} instance for which the policy is created. - * @param hosts the initial hosts to use. - * @return the newly created {@link LoadBalancingPolicy} instance. - */ - public LoadBalancingPolicy create(Cluster cluster, Collection hosts); - } - - /** - * A Round-robin load balancing policy. - *

- * This policy queries nodes in a round-robin fashion. For a given query, - * if an host fail, the next one (following the round-robin order) is - * tried, until all hosts have been tried. - *

- * This policy is not datacenter aware and will include every known - * Cassandra host in its round robin algorithm. If you use multiple - * datacenter this will be inefficient and you will want to use the - * DCAwareRoundRobin load balancing policy instead. - */ - public static class RoundRobin implements LoadBalancingPolicy { - - private final CopyOnWriteArrayList liveHosts; - private final AtomicInteger index = new AtomicInteger(); - - private RoundRobin(Collection hosts) { - this.liveHosts = new CopyOnWriteArrayList(hosts); - this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); - } - - /** - * Return the HostDistance for the provided host. - *

- * This policy consider all nodes as local. This is generally the right - * thing to do in a single datacenter deployement. If you use multiple - * datacenter, see {@link DCAwareRoundRobin} instead. - * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host}. - */ - public HostDistance distance(Host host) { - return HostDistance.LOCAL; - } - - /** - * Returns the hosts to use for a new query. - *

- * The returned plan will try each known host of the cluster. Upon each - * call to this method, the ith host of the plans returned will cycle - * over all the host of the cluster in a round-robin fashion. - * - * @return a new query plan, i.e. an iterator indicating which host to - * try first for querying, which one to use as failover, etc... - */ - public Iterator newQueryPlan(Query query) { - - // We clone liveHosts because we want a version of the list that - // cannot change concurrently of the query plan iterator (this - // would be racy). We use clone() as it don't involve a copy of the - // underlying array (and thus we rely on liveHosts being a CopyOnWriteArrayList). - final List hosts = (List)liveHosts.clone(); - final int startIdx = index.getAndIncrement(); - - // Overflow protection; not theoretically thread safe but should be good enough - if (startIdx > Integer.MAX_VALUE - 10000) - index.set(0); - - return new AbstractIterator() { - - private int idx = startIdx; - private int remaining = hosts.size(); - - protected Host computeNext() { - if (remaining <= 0) - return endOfData(); - - remaining--; - return hosts.get(idx++ % hosts.size()); - } - }; - } - - public void onUp(Host host) { - liveHosts.addIfAbsent(host); - } - - public void onDown(Host host) { - liveHosts.remove(host); - } - - public void onAdd(Host host) { - onUp(host); - } - - public void onRemove(Host host) { - onDown(host); - } - - /** - * A {@code LoadBalancingPolicy.Factory} that creates RoundRobin - * policies (on the whole cluster). - */ - public static class Factory implements LoadBalancingPolicy.Factory { - - public static final Factory INSTANCE = new Factory(); - - private Factory() {} - - public LoadBalancingPolicy create(Cluster cluster, Collection hosts) { - return new RoundRobin(hosts); - } - } - } - - /** - * A data-center aware Round-robin load balancing policy. - *

- * This policy provides round-robin queries over the node of the local - * datacenter. It also includes in the query plans returned a configurable - * number of hosts in the remote datacenters, but those are always tried - * after the local nodes. In other words, this policy guarantees that no - * host in a remote datacenter will be queried unless no host in the local - * datacenter can be reached. - *

- * If used with a single datacenter, this policy is equivalent to the - * {@code LoadBalancingPolicy.RoundRobin} policy, but its DC awareness - * incurs a slight overhead so the {@code LoadBalancingPolicy.RoundRobin} - * policy could be prefered to this policy in that case. - */ - public static class DCAwareRoundRobin implements LoadBalancingPolicy { - - private final ConcurrentMap> perDcLiveHosts = new ConcurrentHashMap>(); - private final AtomicInteger index = new AtomicInteger(); - private final String localDc; - private final int usedHostsPerRemoteDc; - - private DCAwareRoundRobin(Collection hosts, String localDc, int usedHostsPerRemoteDc) { - this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); - this.localDc = localDc; - this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; - - for (Host host : hosts) { - String dc = dc(host); - CopyOnWriteArrayList prev = perDcLiveHosts.get(dc); - if (prev == null) - perDcLiveHosts.put(dc, new CopyOnWriteArrayList(Collections.singletonList(host))); - else - prev.addIfAbsent(host); - } - } - - private String dc(Host host) { - String dc = host.getDatacenter(); - return dc == null ? localDc : dc; - } - - /** - * Return the HostDistance for the provided host. - *

- * This policy consider nodes in the local datacenter as {@code LOCAL}. - * For each remote datacenter, it considers a configurable number of - * hosts as {@code REMOTE} and the rest is {@code IGNORED}. - *

- * To configure how many host in each remote datacenter is considered - * {@code REMOTE}, see {@link Factory#create(String, int)}. - * - * @param host the host of which to return the distance of. - * @return the HostDistance to {@code host}. - */ - public HostDistance distance(Host host) { - String dc = dc(host); - if (dc.equals(localDc)) - return HostDistance.LOCAL; - - CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); - if (dcHosts == null || usedHostsPerRemoteDc == 0) - return HostDistance.IGNORED; - - // We need to clone, otherwise our subList call is not thread safe - dcHosts = (CopyOnWriteArrayList)dcHosts.clone(); - return dcHosts.subList(0, Math.min(dcHosts.size(), usedHostsPerRemoteDc)).contains(host) - ? HostDistance.REMOTE - : HostDistance.IGNORED; - } - - /** - * Returns the hosts to use for a new query. - *

- * The returned plan will always try each known host in the local - * datacenter first, and then, if none of the local host is reacheable, - * will try up to a configurable number of other host per remote datacenter. - * The order of the local node in the returned query plan will follow a - * Round-robin algorithm. - * - * @return a new query plan, i.e. an iterator indicating which host to - * try first for querying, which one to use as failover, etc... - */ - public Iterator newQueryPlan(Query query) { - - CopyOnWriteArrayList localLiveHosts = perDcLiveHosts.get(localDc); - final List hosts = localLiveHosts == null ? Collections.emptyList() : (List)localLiveHosts.clone(); - final int startIdx = index.getAndIncrement(); - - // Overflow protection; not theoretically thread safe but should be good enough - if (startIdx > Integer.MAX_VALUE - 10000) - index.set(0); - - return new AbstractIterator() { - - private int idx = startIdx; - private int remainingLocal = hosts.size(); - - // For remote Dcs - private Iterator remoteDcs; - private List currentDcHosts; - private int currentDcRemaining; - - protected Host computeNext() { - if (remainingLocal > 0) { - remainingLocal--; - return hosts.get(idx++ % hosts.size()); - } - - if (currentDcHosts != null && currentDcRemaining > 0) { - currentDcRemaining--; - return currentDcHosts.get(idx++ % currentDcHosts.size()); - } - - if (remoteDcs == null) { - Set copy = new HashSet(perDcLiveHosts.keySet()); - copy.remove(localDc); - remoteDcs = copy.iterator(); - } - - if (!remoteDcs.hasNext()) - return endOfData(); - - String nextRemoteDc = remoteDcs.next(); - CopyOnWriteArrayList nextDcHosts = perDcLiveHosts.get(nextRemoteDc); - if (nextDcHosts != null) { - currentDcHosts = (List)nextDcHosts.clone(); - currentDcRemaining = Math.min(usedHostsPerRemoteDc, currentDcHosts.size()); - } - - return computeNext(); - } - }; - } - - public void onUp(Host host) { - String dc = dc(host); - CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc); - if (dcHosts == null) { - CopyOnWriteArrayList newMap = new CopyOnWriteArrayList(Collections.singletonList(host)); - dcHosts = perDcLiveHosts.putIfAbsent(dc, newMap); - // If we've successfully put our new host, we're good, otherwise we've been beaten so continue - if (dcHosts == null) - return; - } - dcHosts.addIfAbsent(host); - } - - public void onDown(Host host) { - CopyOnWriteArrayList dcHosts = perDcLiveHosts.get(dc(host)); - if (dcHosts != null) - dcHosts.remove(host); - } - - public void onAdd(Host host) { - onUp(host); - } - - public void onRemove(Host host) { - onDown(host); - } - - /** - * A {@code LoadBalancingPolicy.Factory} that creates DCAwareRoundRobin - * policies. - */ - public static class Factory implements LoadBalancingPolicy.Factory { - - public static final int DEFAULT_USED_HOSTS_PER_REMOTE_DC = 0; - - private final String localDc; - private final int usedHostsPerRemoteDc; - - private Factory(String localDc, int usedHostsPerRemoteDc) { - this.localDc = localDc; - this.usedHostsPerRemoteDc = usedHostsPerRemoteDc; - } - - /** - * Creates a new DCAwareRoundRobin policy factory given the name of - * the local datacenter. - *

- * The name of the local datacenter provided must be the local - * datacenter name as known by Cassandra. - *

- * The policy created by the returned factory will ignore all - * remote hosts. In other words, this is equivalent to - * {@code create(localDc, 0)}. - * - * @param localDc the name of the local datacenter (as known by - * Cassandra). - * @return the newly created factory. - */ - public static Factory create(String localDc) { - return new Factory(localDc, DEFAULT_USED_HOSTS_PER_REMOTE_DC); - } - - /** - * Creates a new DCAwareRoundRobin policy factory given the name of - * the local datacenter that use the provided number of host per - * remote datacenter as failover for the local hosts. - *

- * The name of the local datacenter provided must be the local - * datacenter name as known by Cassandra. - * - * @param localDc the name of the local datacenter (as known by - * Cassandra). - * @param usedHostsPerRemoteDc the number of host per remote - * datacenter that policies created by the returned factory should - * consider. Created policies {@code distance} method will return a - * {@code HostDistance.REMOTE} distance for only {@code - * usedHostsPerRemoteDc} hosts per remote datacenter. Other hosts - * of the remote datacenters will be ignored (and thus no - * connections to them will be maintained). - * @return the newly created factory. - */ - public static Factory create(String localDc, int usedHostsPerRemoteDc) { - return new Factory(localDc, usedHostsPerRemoteDc); - } - - public LoadBalancingPolicy create(Cluster cluster, Collection hosts) { - return new DCAwareRoundRobin(hosts, localDc, usedHostsPerRemoteDc); - } - } - } - - /** - * A wrapper load balancing policy that add token awareness to a child policy. - *

- * This policy encapsulates another policy. The resulting policy works in - * the following way: - *

    - *
  • the {@code distance} method is inherited from the child policy.
  • - *
  • the iterator return by the {@code newQueryPlan} method will first - * return the {@code LOCAL} replicas for the query (based on {@link Query#getRoutingKey}) - * if possible (i.e. if the query {@code getRoutingKey} method - * doesn't return {@code null} and if {@link ClusterMetadata#getReplicas} - * returns a non empty set of replicas for that partition key). If no - * local replica can be either found or successfully contacted, the rest - * of the query plan will fallback to one of the child policy.
  • - *
- *

- * Do note that only replica for which the child policy {@code distance} - * method returns {@code HostDistance.LOCAL} will be considered having - * priority. For example, if you wrap {@link DCAwareRoundRobin} with this - * token aware policy, replicas from remote data centers may only be - * returned after all the host of the local data center. - */ - public static class TokenAware implements LoadBalancingPolicy { - - private final ClusterMetadata clusterMetadata; - private final LoadBalancingPolicy childPolicy; - - private TokenAware(Cluster cluster, LoadBalancingPolicy childPolicy) { - this.clusterMetadata = cluster.getMetadata(); - this.childPolicy = childPolicy; - } - - public HostDistance distance(Host host) { - return childPolicy.distance(host); - } - - public Iterator newQueryPlan(final Query query) { - - ByteBuffer partitionKey = query.getRoutingKey(); - if (partitionKey == null) - return childPolicy.newQueryPlan(query); - - final Set replicas = clusterMetadata.getReplicas(partitionKey); - if (replicas.isEmpty()) - return childPolicy.newQueryPlan(query); - - return new AbstractIterator() { - - private final Iterator iter = replicas.iterator(); - private Iterator childIterator; - - protected Host computeNext() { - while (iter.hasNext()) { - Host host = iter.next(); - if (host.getMonitor().isUp() && childPolicy.distance(host) == HostDistance.LOCAL) - return host; - } - - if (childIterator == null) - childIterator = childPolicy.newQueryPlan(query); - - while (childIterator.hasNext()) { - Host host = childIterator.next(); - // Skip it if it was already a local replica - if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL) - return host; - } - return endOfData(); - } - }; - } - - public void onUp(Host host) { - childPolicy.onUp(host); - } - - public void onDown(Host host) { - childPolicy.onDown(host); - } - - public void onAdd(Host host) { - childPolicy.onAdd(host); - } - - public void onRemove(Host host) { - childPolicy.onRemove(host); - } - - public static class Factory implements LoadBalancingPolicy.Factory { - - private final LoadBalancingPolicy.Factory childFactory; - - private Factory(LoadBalancingPolicy.Factory childFactory) { - this.childFactory = childFactory; - } - - /** - * Creates a new {@code TokenAware} policy factory that wraps - * policies build by the provided child load balancing policy - * factory. - * - * @param childFactory the factory for the load balancing policy to - * wrap with token awareness. - * @return the newly created factory. - */ - public static Factory create(LoadBalancingPolicy.Factory childFactory) { - return new Factory(childFactory); - } - - public LoadBalancingPolicy create(Cluster cluster, Collection hosts) { - return new TokenAware(cluster, childFactory.create(cluster, hosts)); - } - } - } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java new file mode 100644 index 00000000000..9b3f1e779b7 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/LoggingRetryPolicy.java @@ -0,0 +1,79 @@ +package com.datastax.driver.core.policies; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.log4j.Level; + +import com.datastax.driver.core.*; + +/** + * A retry policy that wraps another policy, logging the decision made by its sub-policy. + *

+ * Note that this policy only log the IGNORE and RETRY decisions (since + * RETHROW decisions just amount to propate the cassandra exception). The + * logging is done at the INFO level. + */ +public class LoggingRetryPolicy implements RetryPolicy { + + private static final Logger logger = LoggerFactory.getLogger(LoggingRetryPolicy.class); + private final RetryPolicy policy; + + /** + * Creates a new {@code RetryPolicy} that logs the decision of {@code policy}. + * + * @param policy the policy to wrap. The policy created by this constructor + * will return the same decision than {@code policy} but will log them. + */ + public LoggingRetryPolicy(RetryPolicy policy) { + this.policy = policy; + } + + private static ConsistencyLevel cl(ConsistencyLevel cl, RetryDecision decision) { + return decision.getRetryConsistencyLevel() == null ? cl : decision.getRetryConsistencyLevel(); + } + + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + RetryDecision decision = policy.onReadTimeout(cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); + switch (decision.getType()) { + case IGNORE: + String f1 = "Ignoring read timeout (initial consistency: %s, required responses: %i, received responses: %i, data retrieved: %b, retries: %i)"; + logger.info(String.format(f1, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on read timeout at consistency %s (initial consistency: %s, required responses: %i, received responses: %i, data retrieved: %b, retries: %i)"; + logger.info(String.format(f2, cl(cl, decision), cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); + break; + } + return decision; + } + + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + RetryDecision decision = policy.onWriteTimeout(cl, writeType, requiredAcks, receivedAcks, nbRetry); + switch (decision.getType()) { + case IGNORE: + String f1 = "Ignoring write timeout (initial consistency: %s, write type: %s, required acknowledgments: %i, received acknowledgments: %i, retries: %i)"; + logger.info(String.format(f1, cl, writeType, requiredAcks, receivedAcks, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on write timeout at consistency %s(initial consistency: %s, write type: %s, required acknowledgments: %i, received acknowledgments: %i, retries: %i)"; + logger.info(String.format(f2, cl(cl, decision), cl, writeType, requiredAcks, receivedAcks, nbRetry)); + break; + } + return decision; + } + + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + RetryDecision decision = policy.onUnavailable(cl, requiredReplica, aliveReplica, nbRetry); + switch (decision.getType()) { + case IGNORE: + String f1 = "Ignoring unavailable exception (initial consistency: %s, required replica: %i, alive replica: %i, retries: %i)"; + logger.info(String.format(f1, cl, requiredReplica, aliveReplica, nbRetry)); + break; + case RETRY: + String f2 = "Retrying on unavailable exception at consistency %s (initial consistency: %s, required replica: %i, alive replica: %i, retries: %i)"; + logger.info(String.format(f2, cl(cl, decision), cl, requiredReplica, aliveReplica, nbRetry)); + break; + } + return decision; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java index 3372ee0403a..eafe47a1993 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -1,30 +1,65 @@ package com.datastax.driver.core.policies; +/** + * Policies configured for a {@link com.datastax.driver.core.Cluster} instance. + */ public class Policies { - public static final LoadBalancingPolicy.Factory DEFAULT_LOAD_BALANCING_POLICY_FACTORY = LoadBalancingPolicy.RoundRobin.Factory.INSTANCE; - public static final ReconnectionPolicy.Factory DEFAULT_RECONNECTION_POLICY_FACTORY = ReconnectionPolicy.Exponential.makeFactory(2 * 1000, 5 * 60 * 1000); - public static final RetryPolicy DEFAULT_RETRY_POLICY = RetryPolicy.Default.INSTANCE; + /** + * The default load balancing policy. + *

+ * The default load balancing policy is {@link RoundRobinPolicy}. + */ + public static final LoadBalancingPolicy DEFAULT_LOAD_BALANCING_POLICY = new RoundRobinPolicy(); - private final LoadBalancingPolicy.Factory loadBalancingPolicyFactory; - private final ReconnectionPolicy.Factory reconnectionPolicyFactory; + /** + * The default reconnection policy. + *

+ * The default reconnetion policy is an {@link ExponentialReconnectionPolicy} + * where the base delay is 1 second and the max delay is 10 minutes; + */ + public static final ReconnectionPolicy DEFAULT_RECONNECTION_POLICY = new ExponentialReconnectionPolicy(1000, 10 * 60 * 1000); + + /** + * The default retry policy. + *

+ * The default retry policy is {@link DefaultRetryPolicy}. + */ + public static final RetryPolicy DEFAULT_RETRY_POLICY = DefaultRetryPolicy.INSTANCE; + + private final LoadBalancingPolicy loadBalancingPolicy; + private final ReconnectionPolicy reconnectionPolicy; private final RetryPolicy retryPolicy; - public Policies(LoadBalancingPolicy.Factory loadBalancingPolicyFactory, - ReconnectionPolicy.Factory reconnectionPolicyFactory, + /** + * Creates a new {@code Policies} object using the provided policies. + * + * @param loadBalancingPolicy the load balancing policy to use. + * @param reconnectionPolicyFactory the reconnection policy to use. + * @param retryPolicy the retry policy to use. + */ + public Policies(LoadBalancingPolicy loadBalancingPolicy, + ReconnectionPolicy reconnectionPolicy, RetryPolicy retryPolicy) { - this.loadBalancingPolicyFactory = loadBalancingPolicyFactory; - this.reconnectionPolicyFactory = reconnectionPolicyFactory; + this.loadBalancingPolicy = loadBalancingPolicy; + this.reconnectionPolicy = reconnectionPolicy; this.retryPolicy = retryPolicy; } - public LoadBalancingPolicy.Factory getLoadBalancingPolicyFactory() { - return loadBalancingPolicyFactory; + /** + * The load balancing policy in use. + *

+ * The load balancing policy defines how Cassandra hosts are picked for queries. + * + * @return the load balancing policy in use. + */ + public LoadBalancingPolicy getLoadBalancingPolicy() { + return loadBalancingPolicy; } - public ReconnectionPolicy.Factory getReconnectionPolicyFactory() { - return reconnectionPolicyFactory; + public ReconnectionPolicy getReconnectionPolicy() { + return reconnectionPolicy; } public RetryPolicy getRetryPolicy() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java index 0e291d09c14..8a3ce8ca2fb 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java @@ -4,135 +4,39 @@ * Policy that decides how often the reconnection to a dead node is attempted. * * Each time a node is detected dead (because a connection error occurs), a new - * {@code ReconnectionPolicy} instance is created (based on which {@link - * ReconnectionPolicy.Factory} has been configured). Then each call to the - * {@link #nextDelayMs} method of this instance will decide when the next - * reconnection attempt to this node will be tried. + * {@code ReconnectionSchedule} instance is created (through the {@link #newSchedule()}). + * Then each call to the {@link ReconnectionSchedule#nextDelayMs} method of + * this instance will decide when the next reconnection attempt to this node + * will be tried. * - * Note that independently of the reconnection policy, the driver will attempt - * a reconnection if it received a push notification from the Cassandra cluster - * that the node is UP again. So this reconnection policy is mainly useful in - * case where the client loose connection to a node without that node actually - * being down. - * - * The default {@link ReconnectionPolicy.Exponential} policy is usually + * Note that if the driver receives a push notification from the Cassandra cluster + * that a node is UP, any existing {@code ReconnectionSchedule} on that node + * will be cancelled and a new one will be created (in effect, the driver reset + * the scheduler). + * + * The default {@link ExponentialReconnectionPolicy} policy is usually * adequate. */ public interface ReconnectionPolicy { - /** - * When to attempt the next reconnection. - * - * This method will be called once when the host is detected down to - * schedul the first reconnection attempt, and then once after each failed - * reconnection attempt to schedule the next one. Hence each call to this - * method are free to return a different value. - * - * @return a time in milliseconds to wait before attempting the next - * reconnection. - */ - public long nextDelayMs(); + public ReconnectionSchedule newSchedule(); /** - * Simple factory interface to create {@link ReconnectionPolicy} instances. + * Schedules reconnection attempts to a node. */ - public interface Factory { - - /** - * Creates a new connection policy instance. - * - * @return a new {@code ReconnectionPolicy} instance. - */ - public ReconnectionPolicy create(); - } - - /** - * A reconnection policy that waits a constant time between each reconnection attempt. - */ - public static class Constant implements ReconnectionPolicy { - - private final long delayMs; - - private Constant(long delayMs) { - if (delayMs < 0) - throw new IllegalArgumentException(String.format("Invalid negative delay (got %d) for ReconnectionPolicy", delayMs)); - - this.delayMs = delayMs; - } - - public long nextDelayMs() { - return delayMs; - } + public interface ReconnectionSchedule { /** - * Creates a reconnection policy factory that creates {@link - * ReconnectionPolicy.Constant} policies with the provided constant wait - * time. + * When to attempt the next reconnection. * - * @param constantDelayMs the constant delay in milliseconds to use for - * the reconnection policy created by the factory returned by this - * method. - * @return a reconnection policy factory that creates {@code - * Reconnection.Constant} policies with a {@code constantDelayMs} - * milliseconds delay between reconnection attempts. - */ - public static ReconnectionPolicy.Factory makeFactory(final long constantDelayMs) { - return new ReconnectionPolicy.Factory() { - public ReconnectionPolicy create() { - return new Constant(constantDelayMs); - } - }; - } - } - - /** - * A reconnection policy that waits exponentially longer between each - * reconnection attempt (but keeps a constant delay once a maximum delay is - * reached). - */ - public static class Exponential implements ReconnectionPolicy { - - private final long baseDelayMs; - private final long maxDelayMs; - private int attempts; - - private Exponential(long baseDelayMs, long maxDelayMs) { - if (baseDelayMs < 0 || maxDelayMs < 0) - throw new IllegalArgumentException("Invalid negative delay for ReconnectionPolicy"); - if (maxDelayMs < baseDelayMs) - throw new IllegalArgumentException(String.format("maxDelayMs (got %d) cannot be smaller than baseDelayMs (got %d)", maxDelayMs, baseDelayMs)); - - this.baseDelayMs = baseDelayMs; - this.maxDelayMs = maxDelayMs; - } - - public long nextDelayMs() { - ++attempts; - return baseDelayMs * (1 << attempts); - } - - /** - * Creates a reconnection policy factory that creates {@link - * ReconnectionPolicy.Exponential} policies with the provided base and - * max delays. + * This method will be called once when the host is detected down to + * schedule the first reconnection attempt, and then once after each failed + * reconnection attempt to schedule the next one. Hence each call to this + * method are free to return a different value. * - * @param baseDelayMs the base delay in milliseconds to use for - * the reconnection policy created by the factory returned by this - * method. Reconnection attempt {@code i} will be tried - * {@code 2^i * baseDelayMs} milliseconds after the previous one - * (unless {@code maxDelayMs} has been reached, in which case all - * following attempts will be done with a delay of {@code maxDelayMs}). - * @param maxDelayMs the maximum delay to wait between two attempts. - * @return a reconnection policy factory that creates {@code - * Reconnection.Constant} policies with a {@code constantDelayMs} - * milliseconds delay between reconnection attempts. + * @return a time in milliseconds to wait before attempting the next + * reconnection. */ - public static ReconnectionPolicy.Factory makeFactory(final long baseDelayMs, final long maxDelayMs) { - return new ReconnectionPolicy.Factory() { - public ReconnectionPolicy create() { - return new Exponential(baseDelayMs, maxDelayMs); - } - }; - } + public long nextDelayMs(); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java index 4ff9f43e643..6be5c769614 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RetryPolicy.java @@ -1,9 +1,5 @@ package com.datastax.driver.core.policies; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.log4j.Level; - import com.datastax.driver.core.*; /** @@ -144,345 +140,4 @@ public static RetryDecision ignore() { * be thrown for the operation. */ public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry); - - /** - * The default retry policy. - *

- * This policy retries queries in only two cases: - *

    - *
  • On a read timeout, if enough replica replied but data was not retrieved.
  • - *
  • On a write timeout, if we timeout while writting the distributed log used by batch statements.
  • - *
- *

- * This retry policy is conservative in that it will never retry with a - * different consistency level than the one of the initial operation. - */ - public static class Default implements RetryPolicy { - - public static final Default INSTANCE = new Default(); - - private Default() {} - - /** - * Defines whether to retry and at which consistency level on a read timeout. - *

- * This method triggers a maximum of one retry, and only if enough - * replica had responded to the read request but data was not retrieved - * amongst those. Indeed, that case usually means that enough replica - * are alive to satisfy the consistency but the coordinator picked a - * dead one for data retrieval, not having detecte that replica as dead - * yet. The reasoning for retrying then is that by the time we get the - * timeout the dead replica will likely have been detected as dead and - * the retry has a high change of success. - * - * @param cl the original consistency level of the read that timeouted. - * @param requiredResponses the number of responses that were required to - * achieve the requested consistency level. - * @param receivedResponses the number of responses that had been received - * by the time the timeout exception was raised. - * @param dataRetrieved whether actual data (by opposition to data checksum) - * was present in the received responses. - * @param nbRetry the number of retry already performed for this operation. - * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and - * {@code receivedResponses >= requiredResponses && !dataRetrieved}, {@code RetryDecision.rethrow()} otherwise. - */ - public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - return receivedResponses >= requiredResponses && !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); - } - - /** - * Defines whether to retry and at which consistency level on a write timeout. - *

- * This method triggers a maximum of one retry, and only in the case of - * a {@code WriteType.BATCH_LOG} write. The reasoning for the retry in - * that case is that write to the distributed batch log is tried by the - * coordinator of the write against a small subset of all the node alive - * in the local datacenter. Hence, a timeout usually means that none of - * the nodes in that subset were alive but the coordinator hasn't - * detected them as dead. By the time we get the timeout the dead - * nodes will likely have been detected as dead and the retry has thus a - * high change of success. - * - * @param cl the original consistency level of the write that timeouted. - * @param writeType the type of the write that timeouted. - * @param requiredAcks the number of acknowledgments that were required to - * achieve the requested consistency level. - * @param receivedAcks the number of acknowledgments that had been received - * by the time the timeout exception was raised. - * @param nbRetry the number of retry already performed for this operation. - * @return {@code RetryDecision.retry(cl)} if no retry attempt has yet been tried and - * {@code writeType == WriteType.BATCH_LOG}, {@code RetryDecision.rethrow()} otherwise. - */ - public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - // If the batch log write failed, retry the operation as this might just be we were unlucky at picking candidtes - return writeType == WriteType.BATCH_LOG ? RetryDecision.retry(cl) : RetryDecision.rethrow(); - } - - /** - * Defines whether to retry and at which consistency level on an - * unavailable exception. - *

- * This method never retries as a retry on an unavailable exception - * using the same consistency level has almost no change of success. - * - * @param cl the original consistency level for the operation. - * @param requiredReplica the number of replica that should have been - * (known) alive for the operation to be attempted. - * @param aliveReplica the number of replica that were know to be alive by - * the coordinator of the operation. - * @param nbRetry the number of retry already performed for this operation. - * @return {@code RetryDecision.rethrow()}. - */ - public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return RetryDecision.rethrow(); - } - } - - /** - * A retry policy that sometimes retry with a lower consistency level than - * the one initially requested. - *

- * BEWARE: This policy may retry queries using a lower consistency - * level than the one initially requested. By doing so, it may break - * consistency guarantees. In other words, if you use this retry policy, - * there is cases (documented below) where a read at {@code QUORUM} - * may not see a preceding write at {@code QUORUM}. Do not use this - * policy unless you have understood the cases where this can happen and - * are ok with that. It is also highly recommended to always wrap this - * policy into {@link RetryPolicy.RetryLogger} to log the occurences of - * such consistency break. - *

- * This policy implements the same retries than the {@link Default} policy. - * But on top of that, it also retries in the following cases: - *

    - *
  • On a read timeout: if the number of replica that responded is - * greater than one but lower than is required by the requested - * consistency level, the operation is retried at a lower concistency - * level.
  • - *
  • On a write timeout: if the operation is an {@code - * WriteType.UNLOGGED_BATCH} and at least one replica acknowleged the - * write, the operation is retried at a lower consistency level. - * Furthermore, for other operation, if at least one replica acknowleged - * the write, the timeout is ignored.
  • - *
  • On an unavailable exception: if at least one replica is alive, the - * operation is retried at a lower consistency level.
  • - *
- *

- * The reasoning behing this retry policy is the following one. If, based - * on the information the Cassandra coordinator node returns, retrying the - * operation with the initally requested consistency has a change to - * succeed, do it. Otherwise, if based on these informations we know the - * initially requested consistency level cannot be achieve currently, then: - *

    - *
  • For writes, ignore the exception (thus silently failing the - * consistency requirement) if we know the write has been persisted on at - * least one replica.
  • - *
  • For reads, try reading at a lower consistency level (thus silently - * failing the consistency requirement).
  • - *
- * In other words, this policy implements the idea that if the requested - * consistency level cannot be achieved, the next best thing for writes is - * to make sure the data is persisted, and that reading something is better - * than reading nothing, even if there is a risk of reading stale data. - */ - public static class DowngradingConsistency implements RetryPolicy { - - public static final DowngradingConsistency INSTANCE = new DowngradingConsistency(); - - private DowngradingConsistency() {} - - private RetryDecision maxLikelyToWorkCL(int knownOk) { - if (knownOk >= 3) - return RetryDecision.retry(ConsistencyLevel.THREE); - else if (knownOk >= 2) - return RetryDecision.retry(ConsistencyLevel.TWO); - else if (knownOk >= 1) - return RetryDecision.retry(ConsistencyLevel.ONE); - else - return RetryDecision.rethrow(); - } - - /** - * Defines whether to retry and at which consistency level on a read timeout. - *

- * This method triggers a maximum of one retry. If less replica - * responsed than required by the consistency level (but at least one - * replica did respond), the operation is retried at a lower - * consistency level. If enough replica responded but data was not - * retrieve, the operation is retried with the initial consistency - * level. Otherwise, an exception is thrown. - * - * @param cl the original consistency level of the read that timeouted. - * @param requiredResponses the number of responses that were required to - * achieve the requested consistency level. - * @param receivedResponses the number of responses that had been received - * by the time the timeout exception was raised. - * @param dataRetrieved whether actual data (by opposition to data checksum) - * was present in the received responses. - * @param nbRetry the number of retry already performed for this operation. - * @return a RetryDecision as defined above. - */ - public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - if (receivedResponses < requiredResponses) { - // Tries the biggest CL that is expected to work - return maxLikelyToWorkCL(receivedResponses); - } - - return !dataRetrieved ? RetryDecision.retry(cl) : RetryDecision.rethrow(); - } - - /** - * Defines whether to retry and at which consistency level on a write timeout. - *

- * This method triggers a maximum of one retry. If {@code writeType == - * WriteType.BATCH_LOG}, the write is retried with the initial - * consistency level. If {@code writeType == WriteType.UNLOGGED_BATCH} - * and at least one replica acknowleged, the write is retried with a - * lower consistency level (with unlogged batch, a write timeout can - * always mean that part of the batch haven't been persisted at - * all, even if {@code receivedAcks > 0}). For other {@code writeType}, - * if we know the write has been persisted on at least one replica, we - * ignore the exception. Otherwise, an exception is thrown. - * - * @param cl the original consistency level of the write that timeouted. - * @param writeType the type of the write that timeouted. - * @param requiredAcks the number of acknowledgments that were required to - * achieve the requested consistency level. - * @param receivedAcks the number of acknowledgments that had been received - * by the time the timeout exception was raised. - * @param nbRetry the number of retry already performed for this operation. - * @return a RetryDecision as defined above. - */ - public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - switch (writeType) { - case SIMPLE: - case BATCH: - // Since we provide atomicity there is no point in retrying - return RetryDecision.ignore(); - case COUNTER: - // We should not retry counters, period! - return RetryDecision.ignore(); - case UNLOGGED_BATCH: - // Since only part of the batch could have been persisted, - // retry with whatever consistency should allow to persist all - return maxLikelyToWorkCL(receivedAcks); - case BATCH_LOG: - return RetryDecision.retry(cl); - } - return RetryDecision.rethrow(); - } - - /** - * Defines whether to retry and at which consistency level on an - * unavailable exception. - *

- * This method triggers a maximum of one retry. If at least one replica - * is know to be alive, the operation is retried at a lower consistency - * level. - * - * @param cl the original consistency level for the operation. - * @param requiredReplica the number of replica that should have been - * (known) alive for the operation to be attempted. - * @param aliveReplica the number of replica that were know to be alive by - * the coordinator of the operation. - * @param nbRetry the number of retry already performed for this operation. - * @return a RetryDecision as defined above. - */ - public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - if (nbRetry != 0) - return RetryDecision.rethrow(); - - // Tries the biggest CL that is expected to work - return maxLikelyToWorkCL(aliveReplica); - } - } - - /** - * A retry policy that wraps another policy, logging the decision made by its sub-policy. - *

- * Note that this policy only log the IGNORE and RETRY decisions (since - * RETHROW decisions just amount to propate the cassandra exception). The - * logging is done at the INFO level. - */ - public static class RetryLogger implements RetryPolicy { - - private static final Logger logger = LoggerFactory.getLogger(RetryLogger.class); - private final RetryPolicy policy; - - private RetryLogger(RetryPolicy policy) { - this.policy = policy; - } - - /** - * Creates a new {@code RetryPolicy} that logs the decision of {@code policy}. - * - * @param policy the policy to wrap. The policy created by this method - * will return the same decision than {@code policy} but will log them. - * @return the newly create logging policy. - */ - public static RetryPolicy wrap(RetryPolicy policy) { - return new RetryLogger(policy); - } - - private static ConsistencyLevel cl(ConsistencyLevel cl, RetryDecision decision) { - return decision.retryCL == null ? cl : decision.retryCL; - } - - public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - RetryDecision decision = policy.onReadTimeout(cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry); - switch (decision.type) { - case IGNORE: - String f1 = "Ignoring read timeout (initial consistency: %s, required responses: %i, received responses: %i, data retrieved: %b, retries: %i)"; - logger.info(String.format(f1, cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); - break; - case RETRY: - String f2 = "Retrying on read timeout at consistency %s (initial consistency: %s, required responses: %i, received responses: %i, data retrieved: %b, retries: %i)"; - logger.info(String.format(f2, cl(cl, decision), cl, requiredResponses, receivedResponses, dataRetrieved, nbRetry)); - break; - } - return decision; - } - - public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - RetryDecision decision = policy.onWriteTimeout(cl, writeType, requiredAcks, receivedAcks, nbRetry); - switch (decision.type) { - case IGNORE: - String f1 = "Ignoring write timeout (initial consistency: %s, write type: %s, required acknowledgments: %i, received acknowledgments: %i, retries: %i)"; - logger.info(String.format(f1, cl, writeType, requiredAcks, receivedAcks, nbRetry)); - break; - case RETRY: - String f2 = "Retrying on write timeout at consistency %s(initial consistency: %s, write type: %s, required acknowledgments: %i, received acknowledgments: %i, retries: %i)"; - logger.info(String.format(f2, cl(cl, decision), cl, writeType, requiredAcks, receivedAcks, nbRetry)); - break; - } - return decision; - } - - public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - RetryDecision decision = policy.onUnavailable(cl, requiredReplica, aliveReplica, nbRetry); - switch (decision.type) { - case IGNORE: - String f1 = "Ignoring unavailable exception (initial consistency: %s, required replica: %i, alive replica: %i, retries: %i)"; - logger.info(String.format(f1, cl, requiredReplica, aliveReplica, nbRetry)); - break; - case RETRY: - String f2 = "Retrying on unavailable exception at consistency %s (initial consistency: %s, required replica: %i, alive replica: %i, retries: %i)"; - logger.info(String.format(f2, cl(cl, decision), cl, requiredReplica, aliveReplica, nbRetry)); - break; - } - return decision; - } - } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java new file mode 100644 index 00000000000..dd04d220494 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/RoundRobinPolicy.java @@ -0,0 +1,108 @@ +package com.datastax.driver.core.policies; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.CopyOnWriteArrayList; + +import com.google.common.collect.AbstractIterator; + +import com.datastax.driver.core.*; + +/** + * A Round-robin load balancing policy. + *

+ * This policy queries nodes in a round-robin fashion. For a given query, + * if an host fail, the next one (following the round-robin order) is + * tried, until all hosts have been tried. + *

+ * This policy is not datacenter aware and will include every known + * Cassandra host in its round robin algorithm. If you use multiple + * datacenter this will be inefficient and you will want to use the + * {@link DCAwareRoundRobinPolicy} load balancing policy instead. + */ +public class RoundRobinPolicy implements LoadBalancingPolicy { + + private final CopyOnWriteArrayList liveHosts = new CopyOnWriteArrayList(); + private final AtomicInteger index = new AtomicInteger(); + + /** + * Creates a load balancing policy that picks host to query in a round robin + * fashion (on all the hosts of the Cassandra cluster). + */ + public RoundRobinPolicy() {} + + public void init(Cluster cluster, Collection hosts) { + this.liveHosts.addAll(hosts); + this.index.set(new Random().nextInt(Math.max(hosts.size(), 1))); + } + + /** + * Return the HostDistance for the provided host. + *

+ * This policy consider all nodes as local. This is generally the right + * thing to do in a single datacenter deployement. If you use multiple + * datacenter, see {@link DCAwareRoundRobinPolicy} instead. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host}. + */ + public HostDistance distance(Host host) { + return HostDistance.LOCAL; + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will try each known host of the cluster. Upon each + * call to this method, the ith host of the plans returned will cycle + * over all the host of the cluster in a round-robin fashion. + * + * @param query the query for which to build the plan. + * @return a new query plan, i.e. an iterator indicating which host to + * try first for querying, which one to use as failover, etc... + */ + public Iterator newQueryPlan(Query query) { + + // We clone liveHosts because we want a version of the list that + // cannot change concurrently of the query plan iterator (this + // would be racy). We use clone() as it don't involve a copy of the + // underlying array (and thus we rely on liveHosts being a CopyOnWriteArrayList). + final List hosts = (List)liveHosts.clone(); + final int startIdx = index.getAndIncrement(); + + // Overflow protection; not theoretically thread safe but should be good enough + if (startIdx > Integer.MAX_VALUE - 10000) + index.set(0); + + return new AbstractIterator() { + + private int idx = startIdx; + private int remaining = hosts.size(); + + protected Host computeNext() { + if (remaining <= 0) + return endOfData(); + + remaining--; + return hosts.get(idx++ % hosts.size()); + } + }; + } + + public void onUp(Host host) { + liveHosts.addIfAbsent(host); + } + + public void onDown(Host host) { + liveHosts.remove(host); + } + + public void onAdd(Host host) { + onUp(host); + } + + public void onRemove(Host host) { + onDown(host); + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java new file mode 100644 index 00000000000..fb099305060 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java @@ -0,0 +1,127 @@ +package com.datastax.driver.core.policies; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Iterator; +import java.util.Set; + +import com.google.common.collect.AbstractIterator; + +import com.datastax.driver.core.*; + +/** + * A wrapper load balancing policy that add token awareness to a child policy. + *

+ * This policy encapsulates another policy. The resulting policy works in + * the following way: + *

    + *
  • the {@code distance} method is inherited from the child policy.
  • + *
  • the iterator return by the {@code newQueryPlan} method will first + * return the {@code LOCAL} replicas for the query (based on {@link Query#getRoutingKey}) + * if possible (i.e. if the query {@code getRoutingKey} method + * doesn't return {@code null} and if {@link ClusterMetadata#getReplicas} + * returns a non empty set of replicas for that partition key). If no + * local replica can be either found or successfully contacted, the rest + * of the query plan will fallback to one of the child policy.
  • + *
+ *

+ * Do note that only replica for which the child policy {@code distance} + * method returns {@code HostDistance.LOCAL} will be considered having + * priority. For example, if you wrap {@link DCAwareRoundRobin} with this + * token aware policy, replicas from remote data centers may only be + * returned after all the host of the local data center. + */ +public class TokenAwarePolicy implements LoadBalancingPolicy { + + private final LoadBalancingPolicy childPolicy; + private ClusterMetadata clusterMetadata; + + /** + * Creates a new {@code TokenAware} policy that wraps the provided child + * load balancing policy. + * + * @param childPolicy the load balancing policy to wrap with token + * awareness. + */ + public TokenAwarePolicy(LoadBalancingPolicy childPolicy) { + this.childPolicy = childPolicy; + } + + public void init(Cluster cluster, Collection hosts) { + clusterMetadata = cluster.getMetadata(); + childPolicy.init(cluster, hosts); + } + + /** + * Return the HostDistance for the provided host. + * + * @param host the host of which to return the distance of. + * @return the HostDistance to {@code host} as returned by the wrapped policy. + */ + public HostDistance distance(Host host) { + return childPolicy.distance(host); + } + + /** + * Returns the hosts to use for a new query. + *

+ * The returned plan will first return replicas (whose {@code HostDistance} + * for the child policy is {@code LOCAL}) for the query if it can determine + * them (i.e. mainly if {@code query.getRoutingKey()} is not {@code null}). + * Following what it will return the plan of the child policy. + * + * @param query the query for which to build the plan. + * @return the new query plan. + */ + public Iterator newQueryPlan(final Query query) { + + ByteBuffer partitionKey = query.getRoutingKey(); + if (partitionKey == null) + return childPolicy.newQueryPlan(query); + + final Set replicas = clusterMetadata.getReplicas(partitionKey); + if (replicas.isEmpty()) + return childPolicy.newQueryPlan(query); + + return new AbstractIterator() { + + private final Iterator iter = replicas.iterator(); + private Iterator childIterator; + + protected Host computeNext() { + while (iter.hasNext()) { + Host host = iter.next(); + if (host.getMonitor().isUp() && childPolicy.distance(host) == HostDistance.LOCAL) + return host; + } + + if (childIterator == null) + childIterator = childPolicy.newQueryPlan(query); + + while (childIterator.hasNext()) { + Host host = childIterator.next(); + // Skip it if it was already a local replica + if (!replicas.contains(host) || childPolicy.distance(host) != HostDistance.LOCAL) + return host; + } + return endOfData(); + } + }; + } + + public void onUp(Host host) { + childPolicy.onUp(host); + } + + public void onDown(Host host) { + childPolicy.onDown(host); + } + + public void onAdd(Host host) { + childPolicy.onAdd(host); + } + + public void onRemove(Host host) { + childPolicy.onRemove(host); + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java index 62ba5e386c4..753a8b70e94 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java @@ -79,7 +79,7 @@ private void query(CCMBridge.CCMCluster c, int n, boolean usePrepared) throws No @Test public void roundRobinTest() throws Throwable { - Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.RoundRobin.Factory.INSTANCE); + Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicy(new RoundRobinPolicy()); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); createSchema(c.session); try { @@ -111,7 +111,7 @@ public void roundRobinTest() throws Throwable { @Test public void DCAwareRoundRobinTest() throws Throwable { - Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.DCAwareRoundRobin.Factory.create("dc2")); + Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicy(new DCAwareRoundRobinPolicy("dc2")); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, 2, builder); createMultiDCSchema(c.session); try { @@ -143,7 +143,7 @@ public void tokenAwarePreparedTest() throws Throwable { } public void tokenAwareTest(boolean usePrepared) throws Throwable { - Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicyFactory(LoadBalancingPolicy.TokenAware.Factory.create(LoadBalancingPolicy.RoundRobin.Factory.INSTANCE)); + Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); createSchema(c.session); try { From d7c43c2aab469d086839cbf49ac7ee90107e8743 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 16 Nov 2012 13:29:47 +0100 Subject: [PATCH 077/719] Put ResultSet.Future into it's own class --- .../driver/core/ControlConnection.java | 16 +- .../com/datastax/driver/core/QueryTrace.java | 4 +- .../com/datastax/driver/core/ResultSet.java | 230 +---------------- .../datastax/driver/core/ResultSetFuture.java | 236 ++++++++++++++++++ .../com/datastax/driver/core/Session.java | 18 +- .../datastax/driver/core/SimpleFuture.java | 2 +- .../policies/ConstantReconnectionPolicy.java | 5 +- .../policies/DCAwareRoundRobinPolicy.java | 2 +- .../driver/core/policies/Policies.java | 2 +- .../core/policies/TokenAwarePolicy.java | 2 +- 10 files changed, 260 insertions(+), 257 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index e9f906e8f32..cbfbe0c92d8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -192,11 +192,11 @@ static void refreshSchema(Connection connection, String keyspace, String table, whereClause += " AND columnfamily_name = '" + table + "'"; } - ResultSet.Future ksFuture = table == null - ? new ResultSet.Future(null, new QueryMessage(SELECT_KEYSPACES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)) - : null; - ResultSet.Future cfFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - ResultSet.Future colsFuture = new ResultSet.Future(null, new QueryMessage(SELECT_COLUMNS + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSetFuture ksFuture = table == null + ? new ResultSetFuture(null, new QueryMessage(SELECT_KEYSPACES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)) + : null; + ResultSetFuture cfFuture = new ResultSetFuture(null, new QueryMessage(SELECT_COLUMN_FAMILIES + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSetFuture colsFuture = new ResultSetFuture(null, new QueryMessage(SELECT_COLUMNS + whereClause, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); if (ksFuture != null) connection.write(ksFuture.callback); @@ -232,8 +232,8 @@ public void refreshNodeListAndTokenMap() { private void refreshNodeListAndTokenMap(Connection connection) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { // Make sure we're up to date on nodes and tokens - ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); - ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSetFuture peersFuture = new ResultSetFuture(null, new QueryMessage(SELECT_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSetFuture localFuture = new ResultSetFuture(null, new QueryMessage(SELECT_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); connection.write(peersFuture.callback); connection.write(localFuture.callback); @@ -302,7 +302,7 @@ static boolean waitForSchemaAgreement(Connection connection, ClusterMetadata met long start = System.currentTimeMillis(); long elapsed = 0; while (elapsed < MAX_SCHEMA_AGREEMENT_WAIT_MS) { - ResultSet.Future peersFuture = new ResultSet.Future(null, new QueryMessage(SELECT_SCHEMA_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); + ResultSetFuture peersFuture = new ResultSetFuture(null, new QueryMessage(SELECT_SCHEMA_PEERS, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); // TODO: fix once we have rc1 //ResultSet.Future localFuture = new ResultSet.Future(null, new QueryMessage(SELECT_SCHEMA_LOCAL, ConsistencyLevel.DEFAULT_CASSANDRA_CL)); connection.write(peersFuture.callback); diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java index 6ea831fa694..c4814f96cea 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -149,8 +149,8 @@ private void maybeFetchTrace() { private void doFetchTrace() { try { - ResultSet.Future sessionsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_SESSIONS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); - ResultSet.Future eventsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_EVENTS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); + ResultSetFuture sessionsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_SESSIONS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); + ResultSetFuture eventsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_EVENTS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); CQLRow sessRow = sessionsFuture.get().fetchOne(); if (sessRow != null) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 158b99eb014..5a0d271a6ed 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -3,19 +3,10 @@ import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.*; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; -import java.util.concurrent.TimeUnit; -import org.apache.cassandra.cql3.ColumnSpecification; -import org.apache.cassandra.transport.Message; -import org.apache.cassandra.transport.ProtocolException; -import org.apache.cassandra.transport.ServerError; -import org.apache.cassandra.transport.messages.ErrorMessage; import org.apache.cassandra.transport.messages.ResultMessage; -import com.datastax.driver.core.exceptions.*; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,7 +36,7 @@ private ResultSet(ColumnDefinitions metadata, Queue> rows, Quer this.queriedHost = queriedHost; } - private static ResultSet fromMessage(ResultMessage msg, Session.Manager session, InetAddress queriedHost) { + static ResultSet fromMessage(ResultMessage msg, Session.Manager session, InetAddress queriedHost) { UUID tracingId = msg.getTracingId(); QueryTrace trace = tracingId == null ? null : new QueryTrace(tracingId, session); @@ -171,223 +162,4 @@ public String toString() { sb.append(", ").append(metadata).append("]"); return sb.toString(); } - - public static class Future extends SimpleFuture - { - private final Session.Manager session; - private final Message.Request request; - final ResponseCallback callback = new ResponseCallback(); - - Future(Session.Manager session, Message.Request request) { - this.session = session; - this.request = request; - } - - // The only reason this exists is because we don't want to expose its - // method publicly (otherwise Future could have implemented - // Connection.ResponseCallback directly) - class ResponseCallback implements Connection.ResponseCallback { - - public Message.Request request() { - return request; - } - - public void onSet(Connection connection, Message.Response response) { - try { - switch (response.type) { - case RESULT: - ResultMessage rm = (ResultMessage)response; - switch (rm.kind) { - case SET_KEYSPACE: - // propagate the keyspace change to other connections - session.poolsState.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); - set(ResultSet.fromMessage(rm, session, connection.address)); - break; - case SCHEMA_CHANGE: - ResultMessage.SchemaChange scc = (ResultMessage.SchemaChange)rm; - ResultSet rs = ResultSet.fromMessage(rm, session, connection.address); - switch (scc.change) { - case CREATED: - if (scc.columnFamily.isEmpty()) { - session.cluster.manager.refreshSchema(connection, Future.this, rs, null, null); - } else { - session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); - } - break; - case DROPPED: - if (scc.columnFamily.isEmpty()) { - // If that the one keyspace we are logged in, reset to null (it shouldn't really happen but ...) - if (scc.keyspace.equals(session.poolsState.keyspace)) - session.poolsState.setKeyspace(null); - session.cluster.manager.refreshSchema(connection, Future.this, rs, null, null); - } else { - session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); - } - break; - case UPDATED: - if (scc.columnFamily.isEmpty()) { - session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, null); - } else { - session.cluster.manager.refreshSchema(connection, Future.this, rs, scc.keyspace, scc.columnFamily); - } - break; - } - break; - default: - set(ResultSet.fromMessage(rm, session, connection.address)); - break; - } - break; - case ERROR: - setException(convertException(((ErrorMessage)response).error)); - break; - default: - // This mean we have probably have a bad node, so defunct the connection - connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); - setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); - break; - } - } catch (RuntimeException e) { - // If we get a bug here, the client will not get it, so better forwarding the error - setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); - } - } - - public void onException(Connection connection, Exception exception) { - setException(exception); - } - } - - /** - * Waits for the query to return and return its result. - * - * This method is usually more convenient than {@link #get} as it: - *

    - *
  • It waits for the result uninterruptibly, and so doesn't throw - * {@link InterruptedException}.
  • - *
  • It returns meaningful exceptions, instead of having to deal - * with ExecutionException.
  • - *
- * As such, it is the preferred way to get the future result. - * - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - */ - public ResultSet getUninterruptibly() throws NoHostAvailableException { - try { - while (true) { - try { - return super.get(); - } catch (InterruptedException e) { - // We said 'uninterruptibly' - } - } - } catch (ExecutionException e) { - extractCauseFromExecutionException(e); - throw new AssertionError(); - } - } - - /** - * Waits for the given time for the query to return and return its - * result if available. - * - * This method is usually more convenient than {@link #get} as it: - *
    - *
  • It waits for the result uninterruptibly, and so doesn't throw - * {@link InterruptedException}.
  • - *
  • It returns meaningful exceptions, instead of having to deal - * with ExecutionException.
  • - *
- * As such, it is the preferred way to get the future result. - * - * @throws NoHostAvailableException if no host in the cluster can be - * contacted successfully to execute this query. - * @throws QueryExecutionException if the query triggered an execution - * exception, i.e. an exception thrown by Cassandra when it cannot execute - * the query with the requested consistency level successfully. - * @throws QueryValidationException if the query if invalid (syntax error, - * unauthorized or any other validation problem). - * @throws TimeoutException if the wait timed out (Note that this is - * different from a Cassandra timeout, which is a {@code - * QueryExecutionException}). - */ - public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAvailableException, TimeoutException { - long start = System.nanoTime(); - long timeoutNanos = unit.toNanos(timeout); - try { - while (true) { - try { - return super.get(timeoutNanos, TimeUnit.NANOSECONDS); - } catch (InterruptedException e) { - // We said 'uninterruptibly' - long now = System.nanoTime(); - long elapsedNanos = now - start; - timeout = timeoutNanos - elapsedNanos; - start = now; - } - } - } catch (ExecutionException e) { - extractCauseFromExecutionException(e); - throw new AssertionError(); - } - } - - static void extractCauseFromExecutionException(ExecutionException e) throws NoHostAvailableException { - extractCause(e.getCause()); - } - - static void extractCause(Throwable cause) throws NoHostAvailableException { - if (cause instanceof NoHostAvailableException) - throw (NoHostAvailableException)cause; - else if (cause instanceof QueryExecutionException) - throw (QueryExecutionException)cause; - else if (cause instanceof DriverUncheckedException) - throw (DriverUncheckedException)cause; - else - throw new DriverInternalError("Unexpected exception thrown", cause); - } - - static Exception convertException(org.apache.cassandra.exceptions.TransportException te) { - switch (te.code()) { - case SERVER_ERROR: - return new DriverInternalError("An unexpected error occured server side: " + te.getMessage()); - case PROTOCOL_ERROR: - return new DriverInternalError("An unexpected protocol error occured. This is a bug in this library, please report: " + te.getMessage()); - case UNAVAILABLE: - org.apache.cassandra.exceptions.UnavailableException ue = (org.apache.cassandra.exceptions.UnavailableException)te; - return new UnavailableException(ConsistencyLevel.from(ue.consistency), ue.required, ue.alive); - case OVERLOADED: - return new DriverInternalError("Queried host was overloaded; this shouldn't happen, another node should have been tried"); - case IS_BOOTSTRAPPING: - return new DriverInternalError("Queried host was boostrapping; this shouldn't happen, another node should have been tried"); - case TRUNCATE_ERROR: - return new TruncateException(te.getMessage()); - case WRITE_TIMEOUT: - org.apache.cassandra.exceptions.WriteTimeoutException wte = (org.apache.cassandra.exceptions.WriteTimeoutException)te; - return new WriteTimeoutException(ConsistencyLevel.from(wte.consistency), WriteType.from(wte.writeType), wte.received, wte.blockFor); - case READ_TIMEOUT: - org.apache.cassandra.exceptions.ReadTimeoutException rte = (org.apache.cassandra.exceptions.ReadTimeoutException)te; - return new ReadTimeoutException(ConsistencyLevel.from(rte.consistency), rte.received, rte.blockFor, rte.dataPresent); - case SYNTAX_ERROR: - return new SyntaxError(te.getMessage()); - case UNAUTHORIZED: - return new UnauthorizedException(te.getMessage()); - case INVALID: - return new InvalidQueryException(te.getMessage()); - case CONFIG_ERROR: - return new InvalidConfigurationInQueryException(te.getMessage()); - case ALREADY_EXISTS: - org.apache.cassandra.exceptions.AlreadyExistsException aee = (org.apache.cassandra.exceptions.AlreadyExistsException)te; - return new AlreadyExistsException(aee.ksName, aee.cfName); - default: - return new DriverInternalError("Unknown error return code: " + te.code()); - } - } - } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java new file mode 100644 index 00000000000..0a8c7de5897 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java @@ -0,0 +1,236 @@ +package com.datastax.driver.core; + +import java.util.*; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.TimeUnit; + +import org.apache.cassandra.transport.Message; +import org.apache.cassandra.transport.messages.ErrorMessage; +import org.apache.cassandra.transport.messages.ResultMessage; + +import com.datastax.driver.core.exceptions.*; + +/** + * A future on a {@link ResultSet}. + * + * Note that this class implements guava's {@code + * ListenableFuture} and can thus be used with guava's future utilities. + */ +public class ResultSetFuture extends SimpleFuture +{ + private final Session.Manager session; + private final Message.Request request; + final ResponseCallback callback = new ResponseCallback(); + + ResultSetFuture(Session.Manager session, Message.Request request) { + this.session = session; + this.request = request; + } + + // The only reason this exists is because we don't want to expose its + // method publicly (otherwise Future could have implemented + // Connection.ResponseCallback directly) + class ResponseCallback implements Connection.ResponseCallback { + + public Message.Request request() { + return request; + } + + public void onSet(Connection connection, Message.Response response) { + try { + switch (response.type) { + case RESULT: + ResultMessage rm = (ResultMessage)response; + switch (rm.kind) { + case SET_KEYSPACE: + // propagate the keyspace change to other connections + set(ResultSet.fromMessage(rm, session, connection.address)); + break; + case SCHEMA_CHANGE: + ResultMessage.SchemaChange scc = (ResultMessage.SchemaChange)rm; + ResultSet rs = ResultSet.fromMessage(rm, session, connection.address); + switch (scc.change) { + case CREATED: + if (scc.columnFamily.isEmpty()) { + session.cluster.manager.refreshSchema(connection, ResultSetFuture.this, rs, null, null); + } else { + session.cluster.manager.refreshSchema(connection, ResultSetFuture.this, rs, scc.keyspace, null); + } + break; + case DROPPED: + if (scc.columnFamily.isEmpty()) { + // If that the one keyspace we are logged in, reset to null (it shouldn't really happen but ...) + if (scc.keyspace.equals(session.poolsState.keyspace)) + session.poolsState.setKeyspace(null); + session.cluster.manager.refreshSchema(connection, ResultSetFuture.this, rs, null, null); + } else { + session.cluster.manager.refreshSchema(connection, ResultSetFuture.this, rs, scc.keyspace, null); + } + break; + case UPDATED: + if (scc.columnFamily.isEmpty()) { + session.cluster.manager.refreshSchema(connection, ResultSetFuture.this, rs, scc.keyspace, null); + } else { + session.cluster.manager.refreshSchema(connection, ResultSetFuture.this, rs, scc.keyspace, scc.columnFamily); + } + break; + } + break; + default: + set(ResultSet.fromMessage(rm, session, connection.address)); + break; + } + break; + case ERROR: + setException(convertException(((ErrorMessage)response).error)); + break; + default: + // This mean we have probably have a bad node, so defunct the connection + connection.defunct(new ConnectionException(connection.address, String.format("Got unexpected %s response", response.type))); + setException(new DriverInternalError(String.format("Got unexpected %s response from %s", response.type, connection.address))); + break; + } + } catch (RuntimeException e) { + // If we get a bug here, the client will not get it, so better forwarding the error + setException(new DriverInternalError("Unexpected error while processing response from " + connection.address, e)); + } + } + + public void onException(Connection connection, Exception exception) { + setException(exception); + } + } + + /** + * Waits for the query to return and return its result. + * + * This method is usually more convenient than {@link #get} as it: + *
    + *
  • It waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • It returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + */ + public ResultSet getUninterruptibly() throws NoHostAvailableException { + try { + while (true) { + try { + return super.get(); + } catch (InterruptedException e) { + // We said 'uninterruptibly' + } + } + } catch (ExecutionException e) { + extractCauseFromExecutionException(e); + throw new AssertionError(); + } + } + + /** + * Waits for the given time for the query to return and return its + * result if available. + * + * This method is usually more convenient than {@link #get} as it: + *
    + *
  • It waits for the result uninterruptibly, and so doesn't throw + * {@link InterruptedException}.
  • + *
  • It returns meaningful exceptions, instead of having to deal + * with ExecutionException.
  • + *
+ * As such, it is the preferred way to get the future result. + * + * @throws NoHostAvailableException if no host in the cluster can be + * contacted successfully to execute this query. + * @throws QueryExecutionException if the query triggered an execution + * exception, i.e. an exception thrown by Cassandra when it cannot execute + * the query with the requested consistency level successfully. + * @throws QueryValidationException if the query if invalid (syntax error, + * unauthorized or any other validation problem). + * @throws TimeoutException if the wait timed out (Note that this is + * different from a Cassandra timeout, which is a {@code + * QueryExecutionException}). + */ + public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAvailableException, TimeoutException { + long start = System.nanoTime(); + long timeoutNanos = unit.toNanos(timeout); + try { + while (true) { + try { + return super.get(timeoutNanos, TimeUnit.NANOSECONDS); + } catch (InterruptedException e) { + // We said 'uninterruptibly' + long now = System.nanoTime(); + long elapsedNanos = now - start; + timeout = timeoutNanos - elapsedNanos; + start = now; + } + } + } catch (ExecutionException e) { + extractCauseFromExecutionException(e); + throw new AssertionError(); + } + } + + static void extractCauseFromExecutionException(ExecutionException e) throws NoHostAvailableException { + extractCause(e.getCause()); + } + + static void extractCause(Throwable cause) throws NoHostAvailableException { + if (cause instanceof NoHostAvailableException) + throw (NoHostAvailableException)cause; + else if (cause instanceof QueryExecutionException) + throw (QueryExecutionException)cause; + else if (cause instanceof DriverUncheckedException) + throw (DriverUncheckedException)cause; + else + throw new DriverInternalError("Unexpected exception thrown", cause); + } + + static Exception convertException(org.apache.cassandra.exceptions.TransportException te) { + switch (te.code()) { + case SERVER_ERROR: + return new DriverInternalError("An unexpected error occured server side: " + te.getMessage()); + case PROTOCOL_ERROR: + return new DriverInternalError("An unexpected protocol error occured. This is a bug in this library, please report: " + te.getMessage()); + case UNAVAILABLE: + org.apache.cassandra.exceptions.UnavailableException ue = (org.apache.cassandra.exceptions.UnavailableException)te; + return new UnavailableException(ConsistencyLevel.from(ue.consistency), ue.required, ue.alive); + case OVERLOADED: + return new DriverInternalError("Queried host was overloaded; this shouldn't happen, another node should have been tried"); + case IS_BOOTSTRAPPING: + return new DriverInternalError("Queried host was boostrapping; this shouldn't happen, another node should have been tried"); + case TRUNCATE_ERROR: + return new TruncateException(te.getMessage()); + case WRITE_TIMEOUT: + org.apache.cassandra.exceptions.WriteTimeoutException wte = (org.apache.cassandra.exceptions.WriteTimeoutException)te; + return new WriteTimeoutException(ConsistencyLevel.from(wte.consistency), WriteType.from(wte.writeType), wte.received, wte.blockFor); + case READ_TIMEOUT: + org.apache.cassandra.exceptions.ReadTimeoutException rte = (org.apache.cassandra.exceptions.ReadTimeoutException)te; + return new ReadTimeoutException(ConsistencyLevel.from(rte.consistency), rte.received, rte.blockFor, rte.dataPresent); + case SYNTAX_ERROR: + return new SyntaxError(te.getMessage()); + case UNAUTHORIZED: + return new UnauthorizedException(te.getMessage()); + case INVALID: + return new InvalidQueryException(te.getMessage()); + case CONFIG_ERROR: + return new InvalidConfigurationInQueryException(te.getMessage()); + case ALREADY_EXISTS: + org.apache.cassandra.exceptions.AlreadyExistsException aee = (org.apache.cassandra.exceptions.AlreadyExistsException)te; + return new AlreadyExistsException(aee.ksName, aee.cfName); + default: + return new DriverInternalError("Unknown error return code: " + te.code()); + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 857776b9b27..3a91bd21da0 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -95,10 +95,9 @@ public ResultSet execute(Query query) throws NoHostAvailableException { * This method is a shortcut for {@code executeAsync(new SimpleStatement(query))}. * * @param query the CQL query to execute. - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). + * @return a future on the result of the query. */ - public ResultSet.Future executeAsync(String query) { + public ResultSetFuture executeAsync(String query) { return executeAsync(new SimpleStatement(query)); } @@ -119,13 +118,12 @@ public ResultSet.Future executeAsync(String query) { * CQLStatement} or a {@code BoundStatement}). If it is a {@code * BoundStatement}, all variables must have been bound (the statement must * be ready). - * @return the result of the query. That result will never be null but can - * be empty (and will be for any non SELECT query). + * @return a future on the result of the query. * * @throws IllegalStateException if {@code query} is a {@code BoundStatement} * but {@code !query.isReady()}. */ - public ResultSet.Future executeAsync(Query query) { + public ResultSetFuture executeAsync(Query query) { if (query instanceof CQLStatement) { return manager.executeQuery(new QueryMessage(((CQLStatement)query).getQueryString(), ConsistencyLevel.toCassandraCL(query.getConsistencyLevel())), query); @@ -192,7 +190,7 @@ private PreparedStatement toPreparedStatement(String query, Connection.Future fu } } } catch (ExecutionException e) { - ResultSet.Future.extractCauseFromExecutionException(e); + ResultSetFuture.extractCauseFromExecutionException(e); throw new AssertionError(); } @@ -209,7 +207,7 @@ private PreparedStatement toPreparedStatement(String query, Connection.Future fu throw new DriverInternalError(String.format("%s response received when prepared statement was expected", rm.kind)); } case ERROR: - ResultSet.Future.extractCause(ResultSet.Future.convertException(((ErrorMessage)response).error)); + ResultSetFuture.extractCause(ResultSetFuture.convertException(((ErrorMessage)response).error)); break; default: throw new DriverInternalError(String.format("%s response received when prepared statement was expected", response.type)); @@ -392,11 +390,11 @@ public void prepare(String query, InetAddress toExclude) { } } - public ResultSet.Future executeQuery(Message.Request msg, Query query) { + public ResultSetFuture executeQuery(Message.Request msg, Query query) { if (query.isTracing()) msg.setTracingRequested(); - ResultSet.Future future = new ResultSet.Future(this, msg); + ResultSetFuture future = new ResultSetFuture(this, msg); execute(future.callback, query); return future; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java index f68741f9467..b29cba07662 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleFuture.java @@ -12,7 +12,7 @@ class SimpleFuture extends AbstractFuture { /** * Creates a new {@code SimpleFuture}. */ - public static SimpleFuture create() { + static SimpleFuture create() { return new SimpleFuture(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java index ca902c5f805..0bbdb26d05a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java @@ -9,12 +9,9 @@ public class ConstantReconnectionPolicy implements ReconnectionPolicy { /** * Creates a reconnection policy that creates with the provided constant wait - * time. + * time between reconnection attempts. * * @param constantDelayMs the constant delay in milliseconds to use. - * @return a reconnection policy factory that creates {@code - * Reconnection.Constant} policies with a {@code constantDelayMs} - * milliseconds delay between reconnection attempts. */ public ConstantReconnectionPolicy(long constantDelayMs) { if (constantDelayMs < 0) diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java index 4ad269d8265..f8c48d010c1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/DCAwareRoundRobinPolicy.java @@ -99,7 +99,7 @@ private String dc(Host host) { * hosts as {@code REMOTE} and the rest is {@code IGNORED}. *

* To configure how many host in each remote datacenter is considered - * {@code REMOTE}, see {@link Factory#create(String, int)}. + * {@code REMOTE}, see {@link #DCAwareRoundRobinPolicy(String, int)}. * * @param host the host of which to return the distance of. * @return the HostDistance to {@code host}. diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java index eafe47a1993..3a67461ed01 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -35,7 +35,7 @@ public class Policies { * Creates a new {@code Policies} object using the provided policies. * * @param loadBalancingPolicy the load balancing policy to use. - * @param reconnectionPolicyFactory the reconnection policy to use. + * @param reconnectionPolicy the reconnection policy to use. * @param retryPolicy the retry policy to use. */ public Policies(LoadBalancingPolicy loadBalancingPolicy, diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java index fb099305060..a759aa1dff8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java @@ -27,7 +27,7 @@ *

* Do note that only replica for which the child policy {@code distance} * method returns {@code HostDistance.LOCAL} will be considered having - * priority. For example, if you wrap {@link DCAwareRoundRobin} with this + * priority. For example, if you wrap {@link DCAwareRoundRobinPolicy} with this * token aware policy, replicas from remote data centers may only be * returned after all the host of the local data center. */ From f46c9e68bc7dd41f67d3fc9d6eb5c4e5a247162b Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 16 Nov 2012 16:57:09 +0100 Subject: [PATCH 078/719] Rework/greatly simplify DataType API --- .../datastax/driver/core/BoundStatement.java | 155 +++--- .../java/com/datastax/driver/core/CQLRow.java | 49 +- .../java/com/datastax/driver/core/Codec.java | 92 ++-- .../driver/core/ColumnDefinitions.java | 24 +- .../com/datastax/driver/core/DataType.java | 472 ++++++++++-------- .../driver/core/HostConnectionPool.java | 4 +- .../datastax/driver/core/ResultSetFuture.java | 1 + .../utils/querybuilder/BuiltStatement.java | 2 +- .../driver/core/PreparedStatementTest.java | 52 +- .../com/datastax/driver/core/TestUtils.java | 296 ++++++----- 10 files changed, 596 insertions(+), 551 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index b7b17882f00..08e96383e7d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -97,60 +97,55 @@ public BoundStatement bind(Object... values) { { Object toSet = values[i]; DataType columnType = statement.getVariables().getType(i); - if (columnType.isCollection()) { - switch (columnType.asCollection().getKind()) { - case LIST: - if (!(toSet instanceof List)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column is a list but %s provided", i, toSet.getClass())); - - List l = (List)toSet; - // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type - if (!l.isEmpty()) { - // Ugly? Yes - Class klass = l.get(0).getClass(); - DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); - if (!Codec.isCompatibleSupertype(eltType, klass)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided list value are %s", i, columnType, klass)); - } - break; - case SET: - if (!(toSet instanceof Set)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column is a set but %s provided", i, toSet.getClass())); - - Set s = (Set)toSet; - // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type - if (!s.isEmpty()) { - // Ugly? Yes - Class klass = s.iterator().next().getClass(); - DataType.Native eltType = (DataType.Native)((DataType.Collection.List)columnType).getElementsType(); - if (!Codec.isCompatibleSupertype(eltType, klass)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided set value are %s", i, columnType, klass)); - } - break; - case MAP: - if (!(toSet instanceof Map)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column is a map but %s provided", i, toSet.getClass())); - - Map m = (Map)toSet; - // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type - if (!m.isEmpty()) { - // Ugly? Yes - Map.Entry entry = (Map.Entry)m.entrySet().iterator().next(); - Class keysClass = entry.getKey().getClass(); - Class valuesClass = entry.getValue().getClass(); - - DataType.Collection.Map mapType = (DataType.Collection.Map)columnType; - DataType.Native keysType = (DataType.Native)mapType.getKeysType(); - DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); - if (!Codec.isCompatibleSupertype(keysType, keysClass) || !Codec.isCompatibleSupertype(valuesType, valuesClass)) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type %s conflicts with provided type %s", i, mapType, toSet.getClass())); - } - break; - - } - } else { - if (!Codec.isCompatibleSupertype(columnType.asNative(), toSet.getClass())) - throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but %s provided", i, columnType, toSet.getClass())); + switch (columnType.getName()) { + case LIST: + if (!(toSet instanceof List)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a list but %s provided", i, toSet.getClass())); + + List l = (List)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!l.isEmpty()) { + // Ugly? Yes + Class klass = l.get(0).getClass(); + if (!Codec.isCompatibleSupertype(columnType.getTypeArguments().get(0), klass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided list value are %s", i, columnType, klass)); + } + break; + case SET: + if (!(toSet instanceof Set)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a set but %s provided", i, toSet.getClass())); + + Set s = (Set)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!s.isEmpty()) { + // Ugly? Yes + Class klass = s.iterator().next().getClass(); + if (!Codec.isCompatibleSupertype(columnType.getTypeArguments().get(0), klass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but provided set value are %s", i, columnType, klass)); + } + break; + case MAP: + if (!(toSet instanceof Map)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column is a map but %s provided", i, toSet.getClass())); + + Map m = (Map)toSet; + // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type + if (!m.isEmpty()) { + // Ugly? Yes + Map.Entry entry = (Map.Entry)m.entrySet().iterator().next(); + Class keysClass = entry.getKey().getClass(); + Class valuesClass = entry.getValue().getClass(); + + DataType keysType = columnType.getTypeArguments().get(0); + DataType valuesType = columnType.getTypeArguments().get(1); + if (!Codec.isCompatibleSupertype(keysType, keysClass) || !Codec.isCompatibleSupertype(valuesType, valuesClass)) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type %s conflicts with provided type %s", i, columnType, toSet.getClass())); + } + break; + default: + if (!Codec.isCompatibleSupertype(columnType, toSet.getClass())) + throw new InvalidTypeException(String.format("Invalid type for value %d, column type is %s but %s provided", i, columnType, toSet.getClass())); + break; } setValue(i, Codec.getCodec(columnType).decompose(toSet)); } @@ -201,7 +196,7 @@ public ByteBuffer getRoutingKey() { * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. */ public BoundStatement setBool(int i, boolean v) { - metadata().checkType(i, DataType.Native.BOOLEAN); + metadata().checkType(i, DataType.Name.BOOLEAN); return setValue(i, BooleanType.instance.decompose(v)); } @@ -227,8 +222,7 @@ public BoundStatement setBool(String name, boolean v) { * @throws InvalidTypeException if column {@code i} is not of type INT. */ public BoundStatement setInt(int i, int v) { - DataType.Native type = metadata().checkType(i, DataType.Native.INT); - + metadata().checkType(i, DataType.Name.INT); return setValue(i, Int32Type.instance.decompose(v)); } @@ -254,7 +248,7 @@ public BoundStatement setInt(String name, int v) { * @throws InvalidTypeException if column {@code i} is of type BIGINT or COUNTER. */ public BoundStatement setLong(int i, long v) { - DataType.Native type = metadata().checkType(i, DataType.Native.BIGINT, DataType.Native.COUNTER); + metadata().checkType(i, DataType.Name.BIGINT, DataType.Name.COUNTER); return setValue(i, LongType.instance.decompose(v)); } @@ -280,7 +274,7 @@ public BoundStatement setLong(String name, long v) { * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. */ public BoundStatement setDate(int i, Date v) { - metadata().checkType(i, DataType.Native.TIMESTAMP); + metadata().checkType(i, DataType.Name.TIMESTAMP); return setValue(i, DateType.instance.decompose(v)); } @@ -306,7 +300,7 @@ public BoundStatement setDate(String name, Date v) { * @throws InvalidTypeException if column {@code i} is not of type FLOAT. */ public BoundStatement setFloat(int i, float v) { - DataType.Native type = metadata().checkType(i, DataType.Native.FLOAT); + metadata().checkType(i, DataType.Name.FLOAT); return setValue(i, FloatType.instance.decompose(v)); } @@ -332,7 +326,7 @@ public BoundStatement setFloat(String name, float v) { * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. */ public BoundStatement setDouble(int i, double v) { - DataType.Native type = metadata().checkType(i, DataType.Native.DOUBLE); + metadata().checkType(i, DataType.Name.DOUBLE); return setValue(i, DoubleType.instance.decompose(v)); } @@ -359,9 +353,9 @@ public BoundStatement setDouble(String name, double v) { * following types: VARCHAR, TEXT or ASCII. */ public BoundStatement setString(int i, String v) { - DataType.Native type = metadata().checkType(i, DataType.Native.VARCHAR, - DataType.Native.TEXT, - DataType.Native.ASCII); + DataType.Name type = metadata().checkType(i, DataType.Name.VARCHAR, + DataType.Name.TEXT, + DataType.Name.ASCII); switch (type) { case ASCII: return setValue(i, AsciiType.instance.decompose(v)); @@ -400,7 +394,7 @@ public BoundStatement setString(String name, String v) { * @throws InvalidTypeException if column {@code i} is not of type BLOB. */ public BoundStatement setBytes(int i, ByteBuffer v) { - DataType.Native type = metadata().checkType(i, DataType.Native.BLOB); + metadata().checkType(i, DataType.Name.BLOB); return setBytesUnsafe(i, v); } @@ -463,7 +457,7 @@ public BoundStatement setBytesUnsafe(String name, ByteBuffer v) { * @throws InvalidTypeException if column {@code i} is not of type VARINT. */ public BoundStatement setVarint(int i, BigInteger v) { - metadata().checkType(i, DataType.Native.VARINT); + metadata().checkType(i, DataType.Name.VARINT); return setValue(i, IntegerType.instance.decompose(v)); } @@ -489,7 +483,7 @@ public BoundStatement setVarint(String name, BigInteger v) { * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. */ public BoundStatement setDecimal(int i, BigDecimal v) { - metadata().checkType(i, DataType.Native.DECIMAL); + metadata().checkType(i, DataType.Name.DECIMAL); return setValue(i, DecimalType.instance.decompose(v)); } @@ -517,13 +511,13 @@ public BoundStatement setDecimal(String name, BigDecimal v) { * not a type 1 UUID. */ public BoundStatement setUUID(int i, UUID v) { - DataType.Native type = metadata().checkType(i, DataType.Native.UUID, - DataType.Native.TIMEUUID); + DataType.Name type = metadata().checkType(i, DataType.Name.UUID, + DataType.Name.TIMEUUID); - if (type == DataType.Native.TIMEUUID && v.version() != 1) + if (type == DataType.Name.TIMEUUID && v.version() != 1) throw new InvalidTypeException(String.format("%s is not a Type 1 (time-based) UUID", v)); - return type == DataType.Native.UUID + return type == DataType.Name.UUID ? setValue(i, UUIDType.instance.decompose(v)) : setValue(i, TimeUUIDType.instance.decompose(v)); } @@ -552,7 +546,7 @@ public BoundStatement setUUID(String name, UUID v) { * @throws InvalidTypeException if column {@code i} is not of type INET. */ public BoundStatement setInet(int i, InetAddress v) { - metadata().checkType(i, DataType.Native.INET); + metadata().checkType(i, DataType.Name.INET); return setValue(i, InetAddressType.instance.decompose(v)); } @@ -581,7 +575,7 @@ public BoundStatement setInet(String name, InetAddress v) { */ public BoundStatement setList(int i, List v) { DataType type = metadata().getType(i); - if (!type.isCollection() || type.asCollection().getKind() != DataType.Collection.Kind.LIST) + if (type.getName() != DataType.Name.LIST) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a list", metadata().getName(i), type)); // If the list is empty, it will never fail validation, but otherwise we should check the list given if of the right type @@ -589,8 +583,7 @@ public BoundStatement setList(int i, List v) { // Ugly? Yes Class klass = v.get(0).getClass(); - DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); - if (!Codec.isCompatibleSupertype(eltType, klass)) + if (!Codec.isCompatibleSupertype(type.getTypeArguments().get(0), klass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a list of %s", metadata().getName(i), type, klass)); } @@ -624,7 +617,7 @@ public BoundStatement setList(String name, List v) { */ public BoundStatement setMap(int i, Map v) { DataType type = metadata().getType(i); - if (!type.isCollection() || type.asCollection().getKind() != DataType.Collection.Kind.MAP) + if (type.getName() != DataType.Name.MAP) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a map", metadata().getName(i), type)); if (!v.isEmpty()) { @@ -633,9 +626,8 @@ public BoundStatement setMap(int i, Map v) { Class keysClass = entry.getKey().getClass(); Class valuesClass = entry.getValue().getClass(); - DataType.Collection.Map mapType = (DataType.Collection.Map)type; - DataType.Native keysType = (DataType.Native)mapType.getKeysType(); - DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); + DataType keysType = type.getTypeArguments().get(0); + DataType valuesType = type.getTypeArguments().get(1); if (!Codec.isCompatibleSupertype(keysType, keysClass) || !Codec.isCompatibleSupertype(valuesType, valuesClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a map of %s -> %s", metadata().getName(i), type, keysType, valuesType)); } @@ -670,15 +662,14 @@ public BoundStatement setMap(String name, Map v) { */ public BoundStatement setSet(int i, Set v) { DataType type = metadata().getType(i); - if (!type.isCollection() || type.asCollection().getKind() != DataType.Collection.Kind.SET) + if (type.getName() != DataType.Name.SET) throw new InvalidTypeException(String.format("Column %s is of type %s, cannot set to a set", metadata().getName(i), type)); if (!v.isEmpty()) { // Ugly? Yes Class klass = v.iterator().next().getClass(); - DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); - if (!Codec.isCompatibleSupertype(eltType, klass)) + if (!Codec.isCompatibleSupertype(type.getTypeArguments().get(0), klass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot set to a set of %s", metadata().getName(i), type, klass)); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java index 14756c5c87a..08d09bc5199 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java @@ -76,7 +76,7 @@ public boolean isNull(String name) { * @throws InvalidTypeException if column {@code i} is not of type BOOLEAN. */ public boolean getBool(int i) { - metadata.checkType(i, DataType.Native.BOOLEAN); + metadata.checkType(i, DataType.Name.BOOLEAN); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -111,7 +111,7 @@ public boolean getBool(String name) { * @throws InvalidTypeException if column {@code i} is not of type INT. */ public int getInt(int i) { - metadata.checkType(i, DataType.Native.INT); + metadata.checkType(i, DataType.Name.INT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -146,7 +146,7 @@ public int getInt(String name) { * @throws InvalidTypeException if column {@code i} is not of type BIGINT or COUNTER. */ public long getLong(int i) { - DataType type = metadata.checkType(i, DataType.Native.BIGINT, DataType.Native.COUNTER); + metadata.checkType(i, DataType.Name.BIGINT, DataType.Name.COUNTER); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -181,7 +181,7 @@ public long getLong(String name) { * @throws InvalidTypeException if column {@code i} is not of type TIMESTAMP. */ public Date getDate(int i) { - metadata.checkType(i, DataType.Native.TIMESTAMP); + metadata.checkType(i, DataType.Name.TIMESTAMP); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -216,7 +216,7 @@ public Date getDate(String name) { * @throws InvalidTypeException if column {@code i} is not of type FLOAT. */ public float getFloat(int i) { - metadata.checkType(i, DataType.Native.FLOAT); + metadata.checkType(i, DataType.Name.FLOAT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -251,7 +251,7 @@ public float getFloat(String name) { * @throws InvalidTypeException if column {@code i} is not of type DOUBLE. */ public double getDouble(int i) { - DataType type = metadata.checkType(i, DataType.Native.DOUBLE); + metadata.checkType(i, DataType.Name.DOUBLE); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -332,7 +332,7 @@ public ByteBuffer getBytesUnsafe(String name) { * @throws InvalidTypeException if column {@code i} type is not of type BLOB. */ public ByteBuffer getBytes(int i) { - DataType type = metadata.checkType(i, DataType.Native.BLOB); + metadata.checkType(i, DataType.Name.BLOB); return getBytesUnsafe(i); } @@ -366,15 +366,15 @@ public ByteBuffer getBytes(String name) { * VARCHAR, TEXT or ASCII. */ public String getString(int i) { - DataType type = metadata.checkType(i, DataType.Native.VARCHAR, - DataType.Native.TEXT, - DataType.Native.ASCII); + DataType.Name type = metadata.checkType(i, DataType.Name.VARCHAR, + DataType.Name.TEXT, + DataType.Name.ASCII); ByteBuffer value = data.get(i); if (value == null) return null; - return type == DataType.Native.ASCII + return type == DataType.Name.ASCII ? AsciiType.instance.compose(value) : UTF8Type.instance.compose(value); } @@ -406,7 +406,7 @@ public String getString(String name) { * @throws InvalidTypeException if column {@code i} is not of type VARINT. */ public BigInteger getVarint(int i) { - metadata.checkType(i, DataType.Native.VARINT); + metadata.checkType(i, DataType.Name.VARINT); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -441,7 +441,7 @@ public BigInteger getVarint(String name) { * @throws InvalidTypeException if column {@code i} is not of type DECIMAL. */ public BigDecimal getDecimal(int i) { - metadata.checkType(i, DataType.Native.DECIMAL); + metadata.checkType(i, DataType.Name.DECIMAL); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -477,13 +477,13 @@ public BigDecimal getDecimal(String name) { * or TIMEUUID. */ public UUID getUUID(int i) { - DataType type = metadata.checkType(i, DataType.Native.UUID, DataType.Native.TIMEUUID); + DataType.Name type = metadata.checkType(i, DataType.Name.UUID, DataType.Name.TIMEUUID); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) return null; - return type == DataType.Native.UUID + return type == DataType.Name.UUID ? UUIDType.instance.compose(value) : TimeUUIDType.instance.compose(value); } @@ -515,7 +515,7 @@ public UUID getUUID(String name) { * @throws InvalidTypeException if column {@code i} is not of type INET. */ public InetAddress getInet(int i) { - DataType type = metadata.checkType(i, DataType.Native.INET); + metadata.checkType(i, DataType.Name.INET); ByteBuffer value = data.get(i); if (value == null || value.remaining() == 0) @@ -556,11 +556,10 @@ public InetAddress getInet(String name) { */ public List getList(int i, Class elementsClass) { DataType type = metadata.getType(i); - if (!(type instanceof DataType.Collection.List)) + if (type.getName() != DataType.Name.LIST) throw new InvalidTypeException(String.format("Column %s is not of list type", metadata.getName(i))); - DataType.Native eltType = (DataType.Native)((DataType.Collection.List)type).getElementsType(); - if (!Codec.isCompatibleSubtype(eltType, elementsClass)) + if (!Codec.isCompatibleSubtype(type.getTypeArguments().get(0), elementsClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a list of %s", metadata.getName(i), type, elementsClass)); ByteBuffer value = data.get(i); @@ -606,11 +605,10 @@ public List getList(String name, Class elementsClass) { */ public Set getSet(int i, Class elementsClass) { DataType type = metadata.getType(i); - if (!(type instanceof DataType.Collection.Set)) + if (type.getName() != DataType.Name.SET) throw new InvalidTypeException(String.format("Column %s is not of set type", metadata.getName(i))); - DataType.Native eltType = (DataType.Native)((DataType.Collection.Set)type).getElementsType(); - if (!Codec.isCompatibleSubtype(eltType, elementsClass)) + if (!Codec.isCompatibleSubtype(type.getTypeArguments().get(0), elementsClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a set of %s", metadata.getName(i), type, elementsClass)); ByteBuffer value = data.get(i); @@ -657,12 +655,11 @@ public Set getSet(String name, Class elementsClass) { */ public Map getMap(int i, Class keysClass, Class valuesClass) { DataType type = metadata.getType(i); - if (!(type instanceof DataType.Collection.Map)) + if (type.getName() != DataType.Name.MAP) throw new InvalidTypeException(String.format("Column %s is not of map type", metadata.getName(i))); - DataType.Collection.Map mapType = (DataType.Collection.Map)type; - DataType.Native keysType = (DataType.Native)mapType.getKeysType(); - DataType.Native valuesType = (DataType.Native)mapType.getValuesType(); + DataType keysType = type.getTypeArguments().get(0); + DataType valuesType = type.getTypeArguments().get(1); if (!Codec.isCompatibleSubtype(keysType, keysClass) || !Codec.isCompatibleSubtype(valuesType, valuesClass)) throw new InvalidTypeException(String.format("Column %s is a %s, cannot be retrieve as a map of %s -> %s", metadata.getName(i), type, keysClass, valuesClass)); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Codec.java b/driver-core/src/main/java/com/datastax/driver/core/Codec.java index b66f33e27ab..4b754c7c78c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Codec.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Codec.java @@ -17,36 +17,32 @@ */ class Codec { - private static Map, DataType.Native> rawNativeMap = new HashMap, DataType.Native>() {{ - put(AsciiType.instance, DataType.Native.ASCII); - put(LongType.instance, DataType.Native.BIGINT); - put(BytesType.instance, DataType.Native.BLOB); - put(BooleanType.instance, DataType.Native.BOOLEAN); - put(CounterColumnType.instance, DataType.Native.COUNTER); - put(DecimalType.instance, DataType.Native.DECIMAL); - put(DoubleType.instance, DataType.Native.DOUBLE); - put(FloatType.instance, DataType.Native.FLOAT); - put(InetAddressType.instance, DataType.Native.INET); - put(Int32Type.instance, DataType.Native.INT); - put(UTF8Type.instance, DataType.Native.TEXT); - put(DateType.instance, DataType.Native.TIMESTAMP); - put(UUIDType.instance, DataType.Native.UUID); - put(IntegerType.instance, DataType.Native.VARINT); - put(TimeUUIDType.instance, DataType.Native.TIMEUUID); + private static Map, DataType> rawNativeMap = new HashMap, DataType>() {{ + put(AsciiType.instance, DataType.ascii()); + put(LongType.instance, DataType.bigint()); + put(BytesType.instance, DataType.blob()); + put(BooleanType.instance, DataType.cboolean()); + put(CounterColumnType.instance, DataType.counter()); + put(DecimalType.instance, DataType.decimal()); + put(DoubleType.instance, DataType.cdouble()); + put(FloatType.instance, DataType.cfloat()); + put(InetAddressType.instance, DataType.inet()); + put(Int32Type.instance, DataType.cint()); + put(UTF8Type.instance, DataType.text()); + put(DateType.instance, DataType.timestamp()); + put(UUIDType.instance, DataType.uuid()); + put(IntegerType.instance, DataType.varint()); + put(TimeUUIDType.instance, DataType.timeuuid()); }}; private Codec() {} public static AbstractType getCodec(DataType type) { - if (type.isCollection()) - return (AbstractType)collectionCodec(type.asCollection()); - else - return (AbstractType)nativeCodec(type.asNative()); + return (AbstractType)getCodecInternal(type); } - private static AbstractType nativeCodec(DataType.Native type) { - - switch (type) { + private static AbstractType getCodecInternal(DataType type) { + switch (type.getName()) { case ASCII: return AsciiType.instance; case BIGINT: return LongType.instance; case BLOB: return BytesType.instance; @@ -63,26 +59,10 @@ private static AbstractType nativeCodec(DataType.Native type) { case VARCHAR: return UTF8Type.instance; case VARINT: return IntegerType.instance; case TIMEUUID: return TimeUUIDType.instance; - default: throw new RuntimeException("Unknown native type"); - } - } - - private static AbstractType collectionCodec(DataType.Collection type) { - - switch (type.getKind()) { - case LIST: - AbstractType listElts = getCodec(((DataType.Collection.List)type).getElementsType()); - return ListType.getInstance(listElts); - case SET: - AbstractType setElts = getCodec(((DataType.Collection.Set)type).getElementsType()); - return SetType.getInstance(setElts); - case MAP: - DataType.Collection.Map mt = (DataType.Collection.Map)type; - AbstractType mapKeys = getCodec(mt.getKeysType()); - AbstractType mapValues = getCodec(mt.getValuesType()); - return MapType.getInstance(mapKeys, mapValues); - default: - throw new RuntimeException("Unknown collection type"); + case LIST: return ListType.getInstance(getCodec(type.getTypeArguments().get(0))); + case SET: return SetType.getInstance(getCodec(type.getTypeArguments().get(0))); + case MAP: return MapType.getInstance(getCodec(type.getTypeArguments().get(0)), getCodec(type.getTypeArguments().get(1))); + default: throw new RuntimeException("Unknown type"); } } @@ -94,26 +74,20 @@ public static DataType rawTypeToDataType(AbstractType rawType) { if (rawType instanceof CollectionType) { switch (((CollectionType)rawType).kind) { case LIST: - DataType listElts = rawTypeToDataType(((ListType)rawType).elements); - return new DataType.Collection.List(listElts); + return DataType.list(rawTypeToDataType(((ListType)rawType).elements)); case SET: - DataType setElts = rawTypeToDataType(((SetType)rawType).elements); - return new DataType.Collection.Set(setElts); + return DataType.set(rawTypeToDataType(((SetType)rawType).elements)); case MAP: MapType mt = (MapType)rawType; - DataType mapKeys = rawTypeToDataType(mt.keys); - DataType mapValues = rawTypeToDataType(mt.values); - return new DataType.Collection.Map(mapKeys, mapValues); - default: - throw new DriverInternalError("Unknown collection type"); + return DataType.map(rawTypeToDataType(mt.keys), rawTypeToDataType(mt.values)); } } - throw new DriverInternalError("Unknown type: " + rawType); + throw new DriverInternalError("Unsupported type: " + rawType); } // Returns whether type can be safely subtyped to klass - public static boolean isCompatibleSubtype(DataType.Native type, Class klass) { - switch (type) { + public static boolean isCompatibleSubtype(DataType type, Class klass) { + switch (type.getName()) { case ASCII: return klass.isAssignableFrom(String.class); case BIGINT: return klass.isAssignableFrom(Long.class); case BLOB: return klass.isAssignableFrom(ByteBuffer.class); @@ -130,13 +104,13 @@ public static boolean isCompatibleSubtype(DataType.Native type, Class klass) { case VARCHAR: return klass.isAssignableFrom(String.class); case VARINT: return klass.isAssignableFrom(BigInteger.class); case TIMEUUID: return klass.isAssignableFrom(UUID.class); - default: throw new RuntimeException("Unknown native type"); + default: throw new RuntimeException("Unknown non-collection type " + type); } } // Returns whether klass can be safely subtyped to klass, i.e. if type is a supertype of klass - public static boolean isCompatibleSupertype(DataType.Native type, Class klass) { - switch (type) { + public static boolean isCompatibleSupertype(DataType type, Class klass) { + switch (type.getName()) { case ASCII: return String.class.isAssignableFrom(klass); case BIGINT: return Long.class.isAssignableFrom(klass); case BLOB: return ByteBuffer.class.isAssignableFrom(klass); @@ -153,7 +127,7 @@ public static boolean isCompatibleSupertype(DataType.Native type, Class klass) { case VARCHAR: return String.class.isAssignableFrom(klass); case VARINT: return BigInteger.class.isAssignableFrom(klass); case TIMEUUID: return UUID.class.isAssignableFrom(klass); - default: throw new RuntimeException("Unknown native type"); + default: throw new RuntimeException("Unknown non-collection type " + type); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java index 566abdedca5..1187eae5c7f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java @@ -169,13 +169,27 @@ void checkBounds(int i) { throw new ArrayIndexOutOfBoundsException(i); } - DataType.Native checkType(int i, DataType.Native... types) { + // Note: we avoid having a vararg method to avoid the array allocation that comes with it. + void checkType(int i, DataType.Name name) { DataType defined = getType(i); - for (DataType.Native type : types) - if (type == defined) - return type; + if (name != defined.getName()) + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); + } + + DataType.Name checkType(int i, DataType.Name name1, DataType.Name name2) { + DataType defined = getType(i); + if (name1 != defined.getName() && name2 != defined.getName()) + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); + + return defined.getName(); + } + + DataType.Name checkType(int i, DataType.Name name1, DataType.Name name2, DataType.Name name3) { + DataType defined = getType(i); + if (name1 != defined.getName() && name2 != defined.getName() && name3 != defined.getName()) + throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); - throw new InvalidTypeException(String.format("Column %s is of type %s", getName(i), defined)); + return defined.getName(); } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index df7d581d8ba..06b9774bd5b 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -1,76 +1,24 @@ package com.datastax.driver.core; import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Set; import org.apache.cassandra.db.marshal.MarshalException; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.datastax.driver.core.exceptions.InvalidTypeException; /** - * Supported data types for columns. + * Data types supported by cassandra. */ -public interface DataType { +public class DataType { - /** - * A Cassandra type. - * - * There is two family of type: the native ones and the collection ones. - * One can decide if the type is a native type of a collection one using - * the {@link #isCollection method}. - * - * The {@code NATIVE} types are described in the - * CQL documentation. - * - * The {@code COLLECTION} types are the maps, lists and sets. - */ - public enum Kind { NATIVE, COLLECTION } - - /** - * Returns whether this type is a collection type. - * - * @return {@code true} if the type is a collection one, {@code false} if - * it is a native type. - */ - public boolean isCollection(); - - /** - * Returns this type as a {@link Native} type. - * - * @return this type as a {@link Native} type. - * - * @throws IllegalStateException if this type is not a {@link Native} type. - * You should use {@link #isCollection} to check if this type is a native one - * before calling this method. - */ - public Native asNative(); - - /** - * Returns this type as a {@link Collection} type. - * - * @return this type as a {@link Collection} type. - * - * @throws IllegalStateException if this type is not a {@link Collection} - * type. You should use {@link #isCollection} to check if this type is a - * collection one before calling this method. - */ - public Collection asCollection(); - - /** - * Parse a string value for the type this object represent, returning its - * Cassandra binary representation. - * - * @param value the value to parse. - * @return the binary representation of {@code value}. - * - * @throws InvalidTypeException if {@code value} is not a valid string - * representation for this type. - */ - public ByteBuffer parseString(String value); - - /** - * Native types supported by cassandra. - */ - public enum Native implements DataType { + public enum Name { ASCII, BIGINT, @@ -87,168 +35,302 @@ public enum Native implements DataType { UUID, VARCHAR, VARINT, - TIMEUUID; + TIMEUUID, + LIST, + SET, + MAP; - public boolean isCollection() { return false; } - public Native asNative() { return this; } - public Collection asCollection() { throw new IllegalStateException("Not a collection type, but a native one"); } + /** + * Whether this data type name represent the name of a collection type (e.g. list, set or map). + * + * @return whether this data type name represent the name of a collection type. + */ + public boolean isCollection() { + switch (this) { + case LIST: + case SET: + case MAP: + return true; + } + return false; + } @Override public String toString() { return super.toString().toLowerCase(); } + } - public ByteBuffer parseString(String value) - { - try { - return Codec.getCodec(this).fromString(value); - } catch (MarshalException e) { - throw new InvalidTypeException(String.format("Cannot parse '%s' as a %s value (%s)", value, this, e.getMessage())); - } + private final DataType.Name name; + private final List typeArguments; + + private static final Map primitiveTypeMap = new EnumMap(Name.class); + static { + for (Name name : Name.values()) { + if (!name.isCollection()) + primitiveTypeMap.put(name, new DataType(name, Collections.emptyList())); } } + private static final Set primitveTypeSet = ImmutableSet.copyOf(primitiveTypeMap.values()); + + private DataType(DataType.Name name, List typeArguments) { + this.name = name; + this.typeArguments = typeArguments; + } /** - * A collection type (lists, sets and maps). + * The ASCII type. + * + * @return The ASCII type. */ - public static abstract class Collection implements DataType { + public static DataType ascii() { + return primitiveTypeMap.get(Name.ASCII); + } - /** - * The kind of collection a collection type represents. - */ - public enum Kind { LIST, SET, MAP }; + /** + * The BIGINT type. + * + * @return The BIGINT type. + */ + public static DataType bigint() { + return primitiveTypeMap.get(Name.BIGINT); + } - private final Kind kind; + /** + * The BLOB type. + * + * @return The BLOB type. + */ + public static DataType blob() { + return primitiveTypeMap.get(Name.BLOB); + } - protected Collection(Kind kind) { - this.kind = kind; - } + /** + * The BOOLEAN type. + * + * @return The BOOLEAN type. + */ + public static DataType cboolean() { + return primitiveTypeMap.get(Name.BOOLEAN); + } - public boolean isCollection() { return true; } + /** + * The COUNTER type. + * + * @return The COUNTER type. + */ + public static DataType counter() { + return primitiveTypeMap.get(Name.COUNTER); + } - /** - * The kind of collection this type represents. - * - * @return the kind of collection (list, set or map) this type - * represents. - */ - public Kind getKind() { return kind; } + /** + * The DECIMAL type. + * + * @return The DECIMAL type. + */ + public static DataType decimal() { + return primitiveTypeMap.get(Name.DECIMAL); + } - public Native asNative() { throw new IllegalStateException("Not a native type, but a collection one"); } - public Collection asCollection() { return this; } + /** + * The DOUBLE type. + * + * @return The DOUBLE type. + */ + public static DataType cdouble() { + return primitiveTypeMap.get(Name.DOUBLE); + } - public ByteBuffer parseString(String value) - { - try { - return Codec.getCodec(this).fromString(value); - } catch (MarshalException e) { - throw new InvalidTypeException(String.format("Cannot parse '%s' as a %s value (%s)", value, this, e.getMessage())); - } - } + /** + * The FLOAT type. + * + * @return The FLOAT type. + */ + public static DataType cfloat() { + return primitiveTypeMap.get(Name.FLOAT); + } - /** - * The type of lists. - */ - public static class List extends Collection { - private final DataType elementsType; - - /** - * Creates a list type with the provided element type. - * - * @param elementsType the type of the elements of the list. - */ - public List(DataType elementsType) { - super(Kind.LIST); - this.elementsType = elementsType; - } + /** + * The INET type. + * + * @return The INET type. + */ + public static DataType inet() { + return primitiveTypeMap.get(Name.INET); + } - /** - * The data type of the elements for this list type. - * - * @return the data type of the elements for this list type. - */ - public DataType getElementsType() { - return elementsType; - } + /** + * The INT type. + * + * @return The INT type. + */ + public static DataType cint() { + return primitiveTypeMap.get(Name.INT); + } - @Override - public String toString() { - return "list<" + elementsType + ">"; - } - } + /** + * The TEXT type. + * + * @return The TEXT type. + */ + public static DataType text() { + return primitiveTypeMap.get(Name.TEXT); + } - /** - * The type of sets. - */ - public static class Set extends Collection { - private final DataType elementsType; - - /** - * Creates a set type with the provided element type. - * - * @param elementsType the type of the elements of the set. - */ - public Set(DataType elementsType) { - super(Kind.SET); - this.elementsType = elementsType; - } + /** + * The TIMESTAMP type. + * + * @return The TIMESTAMP type. + */ + public static DataType timestamp() { + return primitiveTypeMap.get(Name.TIMESTAMP); + } - /** - * The data type of the elements for this set type. - * - * @return the data type of the elements for this set type. - */ - public DataType getElementsType() { - return elementsType; - } + /** + * The UUID type. + * + * @return The UUID type. + */ + public static DataType uuid() { + return primitiveTypeMap.get(Name.UUID); + } - @Override - public String toString() { - return "list<" + elementsType + ">"; - } - } + /** + * The VARCHAR type. + * + * @return The VARCHAR type. + */ + public static DataType varchar() { + return primitiveTypeMap.get(Name.VARCHAR); + } - /** - * The type of maps. - */ - public static class Map extends Collection { - private final DataType keysType; - private final DataType valuesType; - - /** - * Creates a map type with the provided key and value type. - * - * @param keysType the type of the keys of the map. - * @param valuesType the type of the keys of the map. - */ - public Map(DataType keysType, DataType valuesType) { - super(Kind.MAP); - this.keysType = keysType; - this.valuesType = valuesType; - } + /** + * The VARINT type. + * + * @return The VARINT type. + */ + public static DataType varint() { + return primitiveTypeMap.get(Name.VARINT); + } - /** - * The data type of the keys for this map type. - * - * @return the data type of the keys for this map type. - */ - public DataType getKeysType() { - return keysType; - } + /** + * The TIMEUUID type. + * + * @return The TIMEUUID type. + */ + public static DataType timeuuid() { + return primitiveTypeMap.get(Name.TIMEUUID); + } - /** - * The data type of the values for this map type. - * - * @return the data type of the values for this map type. - */ - public DataType getValuesType() { - return valuesType; - } + /** + * The type of lists of {@code elementType} elements. + * + * @param elementType the type of the list elements. + * @return the type of lists of {@code elementType} elements. + */ + public static DataType list(DataType elementType) { + return new DataType(Name.LIST, ImmutableList.of(elementType)); + } - @Override - public String toString() { - return "map<" + keysType + ", " + valuesType + ">"; - } + /** + * The type of sets of {@code elementType} elements. + * + * @param elementType the type of the set elements. + * @return the type of sets of {@code elementType} elements. + */ + public static DataType set(DataType elementType) { + return new DataType(Name.SET, ImmutableList.of(elementType)); + } + + /** + * The type of maps of {@code keyType} to {@code valueType} elements. + * + * @param keyType the type of the map keys. + * @param valueType the type of the map values. + * @return the type of map of {@code keyType} to {@code valueType} elements. + */ + public static DataType map(DataType keyType, DataType valueType) { + return new DataType(Name.MAP, ImmutableList.of(keyType, valueType)); + } + + /** + * The name of that type. + * + * @return the name of that type. + */ + public Name getName() { + return name; + } + + /** + * The type arguments of this type. + *

+ * Note that only the collection types (LIST, MAP, SET) have type + * arguments. For the other types, this will return an empty list. + *

+ * For the collection types: + *

    + *
  • For lists and sets, this method returns one argument, the type of + * the elements.
  • + *
  • For maps, this method returns two arguments, the first one is the + * type of the map keys, the second one is the type of the map + * values.
  • + *
      + * + * @return an immutable list containing the type arguments of this type. + */ + public List getTypeArguments() { + return typeArguments; + } + + /** + * Parse a string value for the type this object represent, returning its + * Cassandra binary representation. + * + * @param value the value to parse. + * @return the binary representation of {@code value}. + * + * @throws InvalidTypeException if {@code value} is not a valid string + * representation for this type. + */ + public ByteBuffer parse(String value) { + try { + return Codec.getCodec(this).fromString(value); + } catch (MarshalException e) { + throw new InvalidTypeException(String.format("Cannot parse '%s' as a %s value (%s)", value, this, e.getMessage())); + } + } + + /** + * Returns whether this type is a collection one, i.e. a list, set or map type. + * + * @return whether this type is a collection one. + */ + public boolean isCollection() { + return name.isCollection(); + } + + /** + * Returns a set of all the primitive types, where primitive types are + * defined as the types that don't have type arguments (i.e. excluding + * lists, sets and maps). + * + * @return returns a set of all the primitive types. + */ + public static Set allPrimitiveTypes() { + return primitveTypeSet; + } + + @Override + public String toString() { + switch (name) { + case LIST: + case SET: + return name + "<" + typeArguments.get(0) + ">"; + case MAP: + return name + "<" + typeArguments.get(0) + ", " + typeArguments.get(1) + ">"; + default: + return name.toString(); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index eab1b23d20d..8ccbf6baeba 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -73,7 +73,9 @@ public Connection borrowConnection(long timeout, TimeUnit unit) throws Connectio scheduledForCreation.incrementAndGet(); manager.executor().submit(newConnectionTask); } - return waitForConnection(timeout, unit); + Connection c = waitForConnection(timeout, unit); + c.setKeyspace(manager.poolsState.keyspace); + return c; } int minInFlight = Integer.MAX_VALUE; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java index 0a8c7de5897..ac1dcc0ab21 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java @@ -45,6 +45,7 @@ public void onSet(Connection connection, Message.Response response) { switch (rm.kind) { case SET_KEYSPACE: // propagate the keyspace change to other connections + session.poolsState.setKeyspace(((ResultMessage.SetKeyspace)rm).keyspace); set(ResultSet.fromMessage(rm, session, connection.address)); break; case SCHEMA_CHANGE: diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java index c5e2fba021d..4a967e51e6d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java @@ -39,7 +39,7 @@ void maybeAddRoutingKey(String name, Object value) { for (int i = 0; i < partitionKey.size(); i++) { if (name.equals(partitionKey.get(i).getName())) { - routingKey[i] = partitionKey.get(i).getType().parseString(Utils.toRawString(value)); + routingKey[i] = partitionKey.get(i).getType().parse(Utils.toRawString(value)); return; } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index bde9d9ee0c4..542a606c847 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -20,15 +20,18 @@ public class PreparedStatementTest extends CCMBridge.PerClassSingleNodeCluster { private static final String ALL_SET_TABLE = "all_set"; private static final String ALL_MAP_TABLE = "all_map"; + private boolean exclude(DataType t) { + return t.getName() == DataType.Name.COUNTER; + } + protected Collection getTableDefinitions() { List defs = new ArrayList(4); StringBuilder sb = new StringBuilder(); sb.append("CREATE TABLE ").append(ALL_NATIVE_TABLE).append(" (k text PRIMARY KEY"); - for (DataType.Native type : DataType.Native.values()) { - // This must be handled separatly - if (type == DataType.Native.COUNTER) + for (DataType type : DataType.allPrimitiveTypes()) { + if (exclude(type)) continue; sb.append(", c_").append(type).append(" ").append(type); } @@ -37,9 +40,8 @@ protected Collection getTableDefinitions() { sb = new StringBuilder(); sb.append("CREATE TABLE ").append(ALL_LIST_TABLE).append(" (k text PRIMARY KEY"); - for (DataType.Native type : DataType.Native.values()) { - // This must be handled separatly - if (type == DataType.Native.COUNTER) + for (DataType type : DataType.allPrimitiveTypes()) { + if (exclude(type)) continue; sb.append(", c_list_").append(type).append(" list<").append(type).append(">"); } @@ -48,9 +50,9 @@ protected Collection getTableDefinitions() { sb = new StringBuilder(); sb.append("CREATE TABLE ").append(ALL_SET_TABLE).append(" (k text PRIMARY KEY"); - for (DataType.Native type : DataType.Native.values()) { + for (DataType type : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (type == DataType.Native.COUNTER) + if (exclude(type)) continue; sb.append(", c_set_").append(type).append(" set<").append(type).append(">"); } @@ -59,14 +61,14 @@ protected Collection getTableDefinitions() { sb = new StringBuilder(); sb.append("CREATE TABLE ").append(ALL_MAP_TABLE).append(" (k text PRIMARY KEY"); - for (DataType.Native keyType : DataType.Native.values()) { + for (DataType keyType : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (keyType == DataType.Native.COUNTER) + if (exclude(keyType)) continue; - for (DataType.Native valueType : DataType.Native.values()) { + for (DataType valueType : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (valueType == DataType.Native.COUNTER) + if (exclude(valueType)) continue; sb.append(", c_map_").append(keyType).append("_").append(valueType).append(" map<").append(keyType).append(",").append(valueType).append(">"); } @@ -80,9 +82,9 @@ protected Collection getTableDefinitions() { @Test public void preparedNativeTest() throws NoHostAvailableException { // Test preparing/bounding for all native types - for (DataType.Native type : DataType.Native.values()) { + for (DataType type : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (type == DataType.Native.COUNTER) + if (exclude(type)) continue; String name = "c_" + type; @@ -98,13 +100,13 @@ public void preparedNativeTest() throws NoHostAvailableException { @Test public void prepareListTest() throws NoHostAvailableException { // Test preparing/bounding for all possible list types - for (DataType.Native rawType : DataType.Native.values()) { + for (DataType rawType : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (rawType == DataType.Native.COUNTER) + if (exclude(rawType)) continue; String name = "c_list_" + rawType; - DataType type = new DataType.Collection.List(rawType); + DataType type = DataType.list(rawType); List value = (List)getFixedValue(type);; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_list', ?)", ALL_LIST_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); @@ -118,13 +120,13 @@ public void prepareListTest() throws NoHostAvailableException { @Test public void prepareSetTest() throws NoHostAvailableException { // Test preparing/bounding for all possible set types - for (DataType.Native rawType : DataType.Native.values()) { + for (DataType rawType : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (rawType == DataType.Native.COUNTER) + if (exclude(rawType)) continue; String name = "c_set_" + rawType; - DataType type = new DataType.Collection.Set(rawType); + DataType type = DataType.set(rawType); Set value = (Set)getFixedValue(type);; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_set', ?)", ALL_SET_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); @@ -138,18 +140,18 @@ public void prepareSetTest() throws NoHostAvailableException { @Test public void prepareMapTest() throws NoHostAvailableException { // Test preparing/bounding for all possible map types - for (DataType.Native rawKeyType : DataType.Native.values()) { + for (DataType rawKeyType : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (rawKeyType == DataType.Native.COUNTER) + if (exclude(rawKeyType)) continue; - for (DataType.Native rawValueType : DataType.Native.values()) { + for (DataType rawValueType : DataType.allPrimitiveTypes()) { // This must be handled separatly - if (rawValueType == DataType.Native.COUNTER) + if (exclude(rawValueType)) continue; String name = "c_map_" + rawKeyType + "_" + rawValueType; - DataType type = new DataType.Collection.Map(rawKeyType, rawValueType); + DataType type = DataType.map(rawKeyType, rawValueType); Map value = (Map)getFixedValue(type);; PreparedStatement ps = session.prepare(String.format("INSERT INTO %s(k, %s) VALUES ('prepared_map', ?)", ALL_MAP_TABLE, name)); BoundStatement bs = ps.newBoundStatement(); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java index 64f736aa40b..8b8f43cfc35 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -21,122 +21,110 @@ public abstract class TestUtils { public static final String SIMPLE_KEYSPACE = "ks"; public static BoundStatement setBoundValue(BoundStatement bs, String name, DataType type, Object value) { - if (type.isCollection()) { - switch (type.asCollection().getKind()) { - case LIST: - bs.setList(name, (List)value); - break; - case SET: - bs.setSet(name, (Set)value); - break; - case MAP: - bs.setMap(name, (Map)value); - break; - } - - } else { - switch (type.asNative()) { - case ASCII: - bs.setString(name, (String)value); - break; - case BIGINT: - bs.setLong(name, (Long)value); - break; - case BLOB: - bs.setBytes(name, (ByteBuffer)value); - break; - case BOOLEAN: - bs.setBool(name, (Boolean)value); - break; - case COUNTER: - // Just a no-op, we shouldn't handle counters the same way than other types - break; - case DECIMAL: - bs.setDecimal(name, (BigDecimal)value); - break; - case DOUBLE: - bs.setDouble(name, (Double)value); - break; - case FLOAT: - bs.setFloat(name, (Float)value); - break; - case INET: - bs.setInet(name, (InetAddress)value); - break; - case INT: - bs.setInt(name, (Integer)value); - break; - case TEXT: - bs.setString(name, (String)value); - break; - case TIMESTAMP: - bs.setDate(name, (Date)value); - break; - case UUID: - bs.setUUID(name, (UUID)value); - break; - case VARCHAR: - bs.setString(name, (String)value); - break; - case VARINT: - bs.setVarint(name, (BigInteger)value); - break; - case TIMEUUID: - bs.setUUID(name, (UUID)value); - break; - default: - throw new RuntimeException("Missing handling of " + type); - } + switch (type.getName()) { + case ASCII: + bs.setString(name, (String)value); + break; + case BIGINT: + bs.setLong(name, (Long)value); + break; + case BLOB: + bs.setBytes(name, (ByteBuffer)value); + break; + case BOOLEAN: + bs.setBool(name, (Boolean)value); + break; + case COUNTER: + // Just a no-op, we shouldn't handle counters the same way than other types + break; + case DECIMAL: + bs.setDecimal(name, (BigDecimal)value); + break; + case DOUBLE: + bs.setDouble(name, (Double)value); + break; + case FLOAT: + bs.setFloat(name, (Float)value); + break; + case INET: + bs.setInet(name, (InetAddress)value); + break; + case INT: + bs.setInt(name, (Integer)value); + break; + case TEXT: + bs.setString(name, (String)value); + break; + case TIMESTAMP: + bs.setDate(name, (Date)value); + break; + case UUID: + bs.setUUID(name, (UUID)value); + break; + case VARCHAR: + bs.setString(name, (String)value); + break; + case VARINT: + bs.setVarint(name, (BigInteger)value); + break; + case TIMEUUID: + bs.setUUID(name, (UUID)value); + break; + case LIST: + bs.setList(name, (List)value); + break; + case SET: + bs.setSet(name, (Set)value); + break; + case MAP: + bs.setMap(name, (Map)value); + break; + default: + throw new RuntimeException("Missing handling of " + type); } return bs; } public static Object getValue(CQLRow row, String name, DataType type) { - if (type.isCollection()) { - switch (type.asCollection().getKind()) { - case LIST: - return row.getList(name, classOf(((DataType.Collection.List)type).getElementsType())); - case SET: - return row.getSet(name, classOf(((DataType.Collection.Set)type).getElementsType())); - case MAP: - DataType.Collection.Map mt = (DataType.Collection.Map)type; - return row.getMap(name, classOf(mt.getKeysType()), classOf(mt.getValuesType())); - } - } else { - switch (type.asNative()) { - case ASCII: - return row.getString(name); - case BIGINT: - return row.getLong(name); - case BLOB: - return row.getBytes(name); - case BOOLEAN: - return row.getBool(name); - case COUNTER: - return row.getLong(name); - case DECIMAL: - return row.getDecimal(name); - case DOUBLE: - return row.getDouble(name); - case FLOAT: - return row.getFloat(name); - case INET: - return row.getInet(name); - case INT: - return row.getInt(name); - case TEXT: - return row.getString(name); - case TIMESTAMP: - return row.getDate(name); - case UUID: - return row.getUUID(name); - case VARCHAR: - return row.getString(name); - case VARINT: - return row.getVarint(name); - case TIMEUUID: - return row.getUUID(name); - } + switch (type.getName()) { + case ASCII: + return row.getString(name); + case BIGINT: + return row.getLong(name); + case BLOB: + return row.getBytes(name); + case BOOLEAN: + return row.getBool(name); + case COUNTER: + return row.getLong(name); + case DECIMAL: + return row.getDecimal(name); + case DOUBLE: + return row.getDouble(name); + case FLOAT: + return row.getFloat(name); + case INET: + return row.getInet(name); + case INT: + return row.getInt(name); + case TEXT: + return row.getString(name); + case TIMESTAMP: + return row.getDate(name); + case UUID: + return row.getUUID(name); + case VARCHAR: + return row.getString(name); + case VARINT: + return row.getVarint(name); + case TIMEUUID: + return row.getUUID(name); + case LIST: + return row.getList(name, classOf(type.getTypeArguments().get(0))); + case SET: + return row.getSet(name, classOf(type.getTypeArguments().get(0))); + case MAP: + return row.getMap(name, classOf(type.getTypeArguments().get(0)), classOf(type.getTypeArguments().get(1))); } throw new RuntimeException("Missing handling of " + type); } @@ -144,7 +132,7 @@ public static Object getValue(CQLRow row, String name, DataType type) { private static Class classOf(DataType type) { assert !type.isCollection(); - switch (type.asNative()) { + switch (type.getName()) { case ASCII: case TEXT: case VARCHAR: @@ -180,51 +168,45 @@ private static Class classOf(DataType type) { // Always return the "same" value for each type public static Object getFixedValue(final DataType type) { try { - if (type.isCollection()) { - switch (type.asCollection().getKind()) { - case LIST: - return new ArrayList(){{ add(getFixedValue(((DataType.Collection.List)type).getElementsType())); }}; - case SET: - return new HashSet(){{ add(getFixedValue(((DataType.Collection.Set)type).getElementsType())); }}; - case MAP: - final DataType.Collection.Map mt = (DataType.Collection.Map)type; - return new HashMap(){{ put(getFixedValue(mt.getKeysType()), getFixedValue(mt.getValuesType())); }}; - } - } else { - switch (type.asNative()) { - case ASCII: - return "An ascii string"; - case BIGINT: - return 42L; - case BLOB: - return ByteBuffer.wrap(new byte[]{ (byte)4, (byte)12, (byte)1 }); - case BOOLEAN: - return true; - case COUNTER: - throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); - case DECIMAL: - return new BigDecimal("3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679"); - case DOUBLE: - return 3.142519; - case FLOAT: - return 3.142519f; - case INET: - return InetAddress.getByAddress(new byte[]{(byte)127, (byte)0, (byte)0, (byte)1}); - case INT: - return 24; - case TEXT: - return "A text string"; - case TIMESTAMP: - return new Date(1352288289L); - case UUID: - return UUID.fromString("087E9967-CCDC-4A9B-9036-05930140A41B"); - case VARCHAR: - return "A varchar string"; - case VARINT: - return new BigInteger("123456789012345678901234567890"); - case TIMEUUID: - return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); - } + switch (type.getName()) { + case ASCII: + return "An ascii string"; + case BIGINT: + return 42L; + case BLOB: + return ByteBuffer.wrap(new byte[]{ (byte)4, (byte)12, (byte)1 }); + case BOOLEAN: + return true; + case COUNTER: + throw new UnsupportedOperationException("Cannot 'getSomeValue' for counters"); + case DECIMAL: + return new BigDecimal("3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679"); + case DOUBLE: + return 3.142519; + case FLOAT: + return 3.142519f; + case INET: + return InetAddress.getByAddress(new byte[]{(byte)127, (byte)0, (byte)0, (byte)1}); + case INT: + return 24; + case TEXT: + return "A text string"; + case TIMESTAMP: + return new Date(1352288289L); + case UUID: + return UUID.fromString("087E9967-CCDC-4A9B-9036-05930140A41B"); + case VARCHAR: + return "A varchar string"; + case VARINT: + return new BigInteger("123456789012345678901234567890"); + case TIMEUUID: + return UUID.fromString("FE2B4360-28C6-11E2-81C1-0800200C9A66"); + case LIST: + return new ArrayList(){{ add(getFixedValue(type.getTypeArguments().get(0))); }}; + case SET: + return new HashSet(){{ add(getFixedValue(type.getTypeArguments().get(0))); }}; + case MAP: + return new HashMap(){{ put(getFixedValue(type.getTypeArguments().get(0)), getFixedValue(type.getTypeArguments().get(1))); }}; } } catch (Exception e) { throw new RuntimeException(e); From f9f90f4d6f6a8dbf5b44f665896f55c6fc0e85c1 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 16 Nov 2012 20:55:20 +0100 Subject: [PATCH 079/719] Cleanups and more javadocs --- .../driver/core/AuthInfoProvider.java | 64 +--------- .../java/com/datastax/driver/core/Host.java | 4 +- .../driver/core/SimpleAuthInfoProvider.java | 65 ++++++++++ .../com/datastax/driver/core/WriteType.java | 2 +- .../core/exceptions/DriverException.java | 8 +- .../core/exceptions/DriverInternalError.java | 4 - .../exceptions/DriverUncheckedException.java | 8 +- .../policies/ConstantReconnectionPolicy.java | 25 ++-- .../ExponentialReconnectionPolicy.java | 47 ++++--- .../core/policies/FallthroughRetryPolicy.java | 51 ++++++-- .../driver/core/policies/Policies.java | 15 +++ .../core/policies/ReconnectionPolicy.java | 3 + .../core/utils/querybuilder/QueryBuilder.java | 116 ++++++++++++++++-- .../core/utils/querybuilder/package-info.java | 6 + 14 files changed, 302 insertions(+), 116 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/SimpleAuthInfoProvider.java create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/package-info.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java b/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java index 9cc2bca8165..77d682641f6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java +++ b/driver-core/src/main/java/com/datastax/driver/core/AuthInfoProvider.java @@ -1,7 +1,8 @@ package com.datastax.driver.core; import java.net.InetAddress; -import java.util.*; +import java.util.Collections; +import java.util.Map; /** * Authentication informations provider to connect to Cassandra nodes. @@ -13,7 +14,7 @@ public interface AuthInfoProvider { /** - * A provider that provides no authencation informations. + * A provider that provides no authentication informations. *

      * This is only useful for when no authentication is to be used. */ @@ -35,63 +36,4 @@ public Map getAuthInfos(InetAddress host) { * @return The authentication informations to use. */ public Map getAuthInfos(InetAddress host); - - /** - * A simple {@code AuthInfoProvider} implementation. - *

      - * This provider allows to programmatically define authentication - * information that will then apply to all hosts. - *

      - * Note that it is not safe to add new info to this provider once a - * Cluster instance has been created using this provider. - */ - public static class Simple implements AuthInfoProvider { - - private final Map credentials = new HashMap(); - - /** - * Creates a new, empty, simple authentication info provider. - */ - public Simple() {} - - /** - * Creates a new simple authentication info provider with the - * informations contained in {@code properties}. - * - * @param properties a map of authentication information to use. - */ - public Simple(Map properties) { - this(); - addAll(properties); - } - - public Map getAuthInfos(InetAddress host) { - return credentials; - } - - /** - * Adds a new property to the authentication info returned by this - * provider. - * - * @param property the name of the property to add. - * @param value the value to add for {@code property}. - * @return {@code this} object. - */ - public Simple add(String property, String value) { - credentials.put(property, value); - return this; - } - - /** - * Adds all the key-value pair provided as new authentication - * information returned by this provider. - * - * @param properties a map of authentication information to add. - * @return {@code this} object. - */ - public Simple addAll(Map properties) { - credentials.putAll(properties); - return this; - } - } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Host.java b/driver-core/src/main/java/com/datastax/driver/core/Host.java index 4f3d2ee98e2..6e0596db133 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Host.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Host.java @@ -178,7 +178,7 @@ boolean signalConnectionFailure(ConnectionException exception) { } /** - * Interface for listener that are interested in hosts add, up, down and + * Interface for listeners that are interested in hosts add, up, down and * remove events. *

      * Note that particularly for up and down events, it is possible that the @@ -212,7 +212,7 @@ public interface StateListener { public void onDown(Host host); /** - * Called when a node is remove from the cluster. + * Called when a node is removed from the cluster. * * @param host the removed host. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleAuthInfoProvider.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleAuthInfoProvider.java new file mode 100644 index 00000000000..c193b244378 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleAuthInfoProvider.java @@ -0,0 +1,65 @@ +package com.datastax.driver.core; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * A simple {@code AuthInfoProvider} implementation. + *

      + * This provider allows to programmatically define authentication + * information that will then apply to all hosts. + *

      + * Note that it is not safe to add new info to this provider once a + * Cluster instance has been created using this provider. + */ +public class SimpleAuthInfoProvider implements AuthInfoProvider { + + private final Map credentials = new HashMap(); + + /** + * Creates a new, empty, simple authentication info provider. + */ + public SimpleAuthInfoProvider() {} + + /** + * Creates a new simple authentication info provider with the + * informations contained in {@code properties}. + * + * @param properties a map of authentication information to use. + */ + public SimpleAuthInfoProvider(Map properties) { + this(); + addAll(properties); + } + + public Map getAuthInfos(InetAddress host) { + return credentials; + } + + /** + * Adds a new property to the authentication info returned by this + * provider. + * + * @param property the name of the property to add. + * @param value the value to add for {@code property}. + * @return {@code this} object. + */ + public SimpleAuthInfoProvider add(String property, String value) { + credentials.put(property, value); + return this; + } + + /** + * Adds all the key-value pair provided as new authentication + * information returned by this provider. + * + * @param properties a map of authentication information to add. + * @return {@code this} object. + */ + public SimpleAuthInfoProvider addAll(Map properties) { + credentials.putAll(properties); + return this; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/WriteType.java b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java index cdc49a00474..db662298770 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/WriteType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/WriteType.java @@ -3,7 +3,7 @@ /** * The type of a Cassandra write query. * - * This information is returned by Cassandra when a write timout is raised to + * This information is returned by Cassandra when a write timeout is raised to * indicate what type of write timeouted. This information is useful to decide * which retry policy to adopt. */ diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java index 1be49775ea6..3388c28ae06 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverException.java @@ -5,19 +5,19 @@ */ public class DriverException extends Exception { - public DriverException() { + DriverException() { super(); } - public DriverException(String message) { + DriverException(String message) { super(message); } - public DriverException(Throwable cause) { + DriverException(Throwable cause) { super(cause); } - public DriverException(String message, Throwable cause) { + DriverException(String message, Throwable cause) { super(message, cause); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java index 1a3855a8335..77297a490fa 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverInternalError.java @@ -8,10 +8,6 @@ */ public class DriverInternalError extends RuntimeException { - public DriverInternalError() { - super(); - } - public DriverInternalError(String message) { super(message); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java index 293cbf364c7..0effc66afa2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java +++ b/driver-core/src/main/java/com/datastax/driver/core/exceptions/DriverUncheckedException.java @@ -5,19 +5,19 @@ */ public class DriverUncheckedException extends RuntimeException { - public DriverUncheckedException() { + DriverUncheckedException() { super(); } - public DriverUncheckedException(String message) { + DriverUncheckedException(String message) { super(message); } - public DriverUncheckedException(Throwable cause) { + DriverUncheckedException(Throwable cause) { super(cause); } - public DriverUncheckedException(String message, Throwable cause) { + DriverUncheckedException(String message, Throwable cause) { super(message, cause); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java index 0bbdb26d05a..16607aa38b6 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ConstantReconnectionPolicy.java @@ -20,18 +20,27 @@ public ConstantReconnectionPolicy(long constantDelayMs) { this.delayMs = constantDelayMs; } - public ConstantSchedule newSchedule() { + /** + * The constant delay used by this reconnection policy. + * + * @return the constant delay used by this reconnection policy. + */ + public long getConstantDelayMs() { + return delayMs; + } + + /** + * A new schedule that uses a constant {@code getConstantDelayMs()} delay + * between reconnection attempt. + * + * @return the newly created schedule. + */ + public ReconnectionSchedule newSchedule() { return new ConstantSchedule(); } - public class ConstantSchedule implements ReconnectionSchedule { + private class ConstantSchedule implements ReconnectionSchedule { - /** - * The delay before the next reconnection. - * - * @return the fixed delay set by the {@code - * ConstantReconnectionPolicy} that created this schedule. - */ public long nextDelayMs() { return delayMs; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java index b252ffd454d..170e4b2bffd 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ExponentialReconnectionPolicy.java @@ -27,34 +27,47 @@ public ExponentialReconnectionPolicy(long baseDelayMs, long maxDelayMs) { this.maxDelayMs = maxDelayMs; } - public ExponentialSchedule newSchedule() { + /** + * The base delay in milliseconds for this policy (e.g. the delay before + * the first reconnection attempt). + * + * @return the base delay in milliseconds for this policy. + */ + public long getBaseDelayMs() { + return baseDelayMs; + } + + /** + * The maximum delay in milliseconds between reconnection attempts for this policy. + * + * @return the maximum delay in milliseconds between reconnection attempts for this policy. + */ + public long getMaxDelayMs() { + return maxDelayMs; + } + + /** + * A new schedule that used an exponentially growing delay between reconnection attempts. + *

      + * For this schedule, reconnection attempt {@code i} will be tried + * {@code Math.min(2^(i-1) * getBaseDelayMs(), getMaxDelayMs())} milliseconds after the previous one. + * + * @return the newly created schedule. + */ + public ReconnectionSchedule newSchedule() { return new ExponentialSchedule(); } - public class ExponentialSchedule implements ReconnectionSchedule { + private class ExponentialSchedule implements ReconnectionSchedule { private int attempts; - /** - * The delay before the next reconnection. - *

      - * For this schedule, reconnection attempt {@code i} will be tried - * {@code 2^i * baseDelayMs} milliseconds after the previous one - * (unless {@code maxDelayMs} has been reached, in which case all - * following attempts will be done with a delay of {@code maxDelayMs}), - * where {@code baseDelayMs} (and {@code maxDelayMs}) are the - * delays sets by the {@code ExponentialReconnectionPolicy} from - * which this schedule has been created. - * - * @return the delay before the next reconnection. - */ public long nextDelayMs() { // We "overflow" at 64 attempts but I doubt this matter if (attempts >= 64) return maxDelayMs; - long next = baseDelayMs * (1L << attempts++); - return next > maxDelayMs ? maxDelayMs : next; + return Math.min(baseDelayMs * (1L << attempts++), maxDelayMs); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java index 5474de2d678..694632d0b66 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/FallthroughRetryPolicy.java @@ -8,21 +8,58 @@ * All of the methods of this retry policy unconditionally return {@link RetryPolicy.RetryDecision#rethrow}. * If this policy is used, retry will have to be implemented in business code. */ -public class FallthroughRetryPolicy { +public class FallthroughRetryPolicy implements RetryPolicy { public static final FallthroughRetryPolicy INSTANCE = new FallthroughRetryPolicy(); private FallthroughRetryPolicy() {} - public RetryPolicy.RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { - return RetryPolicy.RetryDecision.rethrow(); + /** + * Defines whether to retry and at which consistency level on a read timeout. + * + * @param cl the original consistency level of the read that timeouted. + * @param requiredResponses the number of responses that were required to + * achieve the requested consistency level. + * @param receivedResponses the number of responses that had been received + * by the time the timeout exception was raised. + * @param dataRetrieved whether actual data (by opposition to data checksum) + * was present in the received responses. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + public RetryDecision onReadTimeout(ConsistencyLevel cl, int requiredResponses, int receivedResponses, boolean dataRetrieved, int nbRetry) { + return RetryDecision.rethrow(); } - public RetryPolicy.RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { - return RetryPolicy.RetryDecision.rethrow(); + /** + * Defines whether to retry and at which consistency level on a write timeout. + * + * @param cl the original consistency level of the write that timeouted. + * @param writeType the type of the write that timeouted. + * @param requiredAcks the number of acknowledgments that were required to + * achieve the requested consistency level. + * @param receivedAcks the number of acknowledgments that had been received + * by the time the timeout exception was raised. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + public RetryDecision onWriteTimeout(ConsistencyLevel cl, WriteType writeType, int requiredAcks, int receivedAcks, int nbRetry) { + return RetryDecision.rethrow(); } - public RetryPolicy.RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { - return RetryPolicy.RetryDecision.rethrow(); + /** + * Defines whether to retry and at which consistency level on an + * unavailable exception. + * + * @param cl the original consistency level for the operation. + * @param requiredReplica the number of replica that should have been + * (known) alive for the operation to be attempted. + * @param aliveReplica the number of replica that were know to be alive by + * the coordinator of the operation. + * @param nbRetry the number of retry already performed for this operation. + * @return {@code RetryDecision.rethrow()}. + */ + public RetryDecision onUnavailable(ConsistencyLevel cl, int requiredReplica, int aliveReplica, int nbRetry) { + return RetryDecision.rethrow(); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java index 3a67461ed01..026fe84b18a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -58,10 +58,25 @@ public LoadBalancingPolicy getLoadBalancingPolicy() { return loadBalancingPolicy; } + /** + * The reconnection policy in use. + *

      + * The reconnection policy defines how often the driver tries to reconnect to a dead node. + * + * @return the reconnection policy in use. + */ public ReconnectionPolicy getReconnectionPolicy() { return reconnectionPolicy; } + /** + * The retry policy in use. + *

      + * The retry policy defines in which conditions a query should be + * automatically retries by the driver. + * + * @return the retry policy in use. + */ public RetryPolicy getRetryPolicy() { return retryPolicy; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java index 8a3ce8ca2fb..0710fcd1206 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/ReconnectionPolicy.java @@ -19,6 +19,9 @@ */ public interface ReconnectionPolicy { + /** + * Creates a new schedule for reconnection attempts. + */ public ReconnectionSchedule newSchedule(); /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index dad397a45bd..720fa743788 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -3,6 +3,21 @@ import com.datastax.driver.core.CQLStatement; import com.datastax.driver.core.TableMetadata; +/** + * Static methods to build a CQL3 query. + *

      + * The queries built by this builder will provide a value for the + * {@link com.datastax.driver.core.Query#getRoutingKey} method only when a + * {@link com.datastax.driver.core.TableMetadata} is provided to the builder. + * It is thus advised to do so if a {@link com.datastax.driver.core.policies.TokenAwarePolicy} + * is in use. + *

      + * The provider builders perform very little validation of the built query. + * There is thus no guarantee that a built query is valid, and it is + * definitively possible to create invalid queries. + *

      + * Note that it could be convenient to use an 'import static' to use the methods of this class. + */ public abstract class QueryBuilder { private QueryBuilder() {} @@ -10,26 +25,59 @@ private QueryBuilder() {} private static final String[] ALL = new String[0]; private static final String[] COUNT_ALL = new String[]{ "count(*)" }; + /** + * Start building a new SELECT query. + * + * @param columns the columns names that should be selected by the query. + * If empty, all columns are selected (it's a 'SELECT * ...'), but you can + * alternatively use {@link #all} to achieve the same effect (select all + * columns). + * @return an in-construction SELECT query (you will need to provide at + * least a FROM clause to complete the query). + */ public static Select.Builder select(String... columns) { return new Select.Builder(columns); } - public static String writeTime(String columnName) { - StringBuilder sb = new StringBuilder(); - sb.append("writetime("); - Utils.appendName(columnName, sb); - sb.append(")"); - return sb.toString(); - } - + /** + * Represents the selection of all columns (for either a SELECT or a DELETE query). + * + * @return an empty array. + */ public static String[] all() { return ALL; } + /** + * Count the returned rows in a SELECT query. + * + * @return an array containing "count(*)" as sole element. + */ public static String[] count() { return COUNT_ALL; } + /** + * Select the write time of the provided column. + * + * @param columnName the name of the column for which to select the write + * time. + * @return {@code "writeTime(" + columnName + ")"}. + */ + public static String writeTime(String columnName) { + StringBuilder sb = new StringBuilder(); + sb.append("writetime("); + Utils.appendName(columnName, sb); + sb.append(")"); + return sb.toString(); + } + + /** + * Select the ttl of the provided column. + * + * @param columnName the name of the column for which to select the ttl. + * @return {@code "ttl(" + columnName + ")"}. + */ public static String ttl(String columnName) { StringBuilder sb = new StringBuilder(); sb.append("ttl("); @@ -38,6 +86,12 @@ public static String ttl(String columnName) { return sb.toString(); } + /** + * Quotes a columnName to make it case sensitive. + * + * @param columnName the column name to quote. + * @return the quoted column name. + */ public static String quote(String columnName) { StringBuilder sb = new StringBuilder(); sb.append("\""); @@ -62,22 +116,62 @@ public static String token(String... columnNames) { return sb.toString(); } + /** + * Start building a new INSERT query. + * + * @param columns the columns names that should be inserted by the query. + * @return an in-construction INSERT query (At least a FROM and a VALUES + * clause needs to be provided to complete the query). + * + * @throws IllegalArgumentException if {@code columns} is empty. + */ public static Insert.Builder insert(String... columns) { return new Insert.Builder(columns); } + /** + * Start building a new UPDATE query. + * + * @param table the name of the table to update. + * @return an in-construction UPDATE query (At least a SET and a WHERE + * clause needs to be provided to complete the query). + */ public static Update.Builder update(String table) { return new Update.Builder(null, table); } + /** + * Start building a new UPDATE query. + * + * @param keyspace the name of the keyspace to use. + * @param table the name of the table to update. + * @return an in-construction UPDATE query (At least a SET and a WHERE + * clause needs to be provided to complete the query). + */ public static Update.Builder update(String keyspace, String table) { return new Update.Builder(keyspace, table); } + /** + * Start building a new UPDATE query. + * + * @param table the name of the table to update. + * @return an in-construction UPDATE query (At least a SET and a WHERE + * clause needs to be provided to complete the query). + */ public static Update.Builder update(TableMetadata table) { return new Update.Builder(table); } + /** + * Start building a new DELETE query. + * + * @param columns the columns names that should be deleted by the query. + * If empty, all columns are deleted, but you can alternatively use {@link #all} + * to achieve the same effect (delete all columns). + * @return an in-construction DELETE query (At least a FROM and a WHERE + * clause needs to be provided to complete the query). + */ public static Delete.Builder delete(String... columns) { return new Delete.Builder(columns); } @@ -96,6 +190,12 @@ public static String mapElt(String columnName, Object key) { return sb.append("]").toString(); } + /** + * Built a new BATCH query on the provided statement. + * + * @param statements the statements to batch. + * @return a new {@code CQLStatement} that batch {@code statements}. + */ public static Batch batch(CQLStatement... statements) { return new Batch(statements); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/package-info.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/package-info.java new file mode 100644 index 00000000000..88d4cb05b57 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/package-info.java @@ -0,0 +1,6 @@ +/** + * A CQL3 query builder. + *

      + * The main entry for this package is the {@code QueryBuilder} class. + */ +package com.datastax.driver.core.utils.querybuilder; From c8a7668651d5483e5ff7df2c915c72f8302968cd Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 10:57:33 +0100 Subject: [PATCH 080/719] Querybuilder javadoc --- .../driver/core/utils/querybuilder/Batch.java | 12 ++++ .../core/utils/querybuilder/Delete.java | 55 ++++++++++++++++- .../core/utils/querybuilder/Insert.java | 53 +++++++++++++++- .../core/utils/querybuilder/QueryBuilder.java | 28 +++++++++ .../core/utils/querybuilder/Select.java | 61 +++++++++++++++++++ .../core/utils/querybuilder/Update.java | 40 +++++++++++- 6 files changed, 240 insertions(+), 9 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java index a0493b3ad00..aaddd231d8e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java @@ -4,6 +4,9 @@ import com.datastax.driver.core.CQLStatement; +/** + * A built BATCH statement. + */ public class Batch extends CQLStatement { private final ByteBuffer routingKey; @@ -49,6 +52,15 @@ public String getQueryString() { return builder.toString(); } + /** + * Adds a USING clause to this statement. + * + * @param usings the options to use. + * @return this statement. + * + * @throws IllegalStateException if a USING clause has already been + * provided. + */ public Batch using(Using... usings) { if (this.usings != null) throw new IllegalStateException("A USING clause has already been provided"); diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java index 96eb630a4f0..dc79bc6fb84 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java @@ -2,6 +2,9 @@ import com.datastax.driver.core.TableMetadata; +/** + * A built DELETE statement. + */ public class Delete extends BuiltStatement { Delete(String keyspace, String table, String[] columnNames, Clause[] clauses, Using[] usings) { @@ -47,6 +50,14 @@ public static class Builder { this.columnNames = columnNames; } + /** + * Adds the table to delete from. + * + * @param table the name of the table to delete from. + * @return this builder. + * + * @throws IllegalStateException if a FROM clause has already been provided. + */ public Builder from(String table) { if (table != null && tableMetadata != null) throw new IllegalStateException("A FROM clause has already been provided"); @@ -54,6 +65,15 @@ public Builder from(String table) { return from(null, table); } + /** + * Adds the table to delete from. + * + * @param keyspace the name of the keyspace to delete from. + * @param table the name of the table to delete from. + * @return this builder. + * + * @throws IllegalStateException if a FROM clause has already been provided. + */ public Builder from(String keyspace, String table) { if (table != null && tableMetadata != null) throw new IllegalStateException("A FROM clause has already been provided"); @@ -63,6 +83,14 @@ public Builder from(String keyspace, String table) { return this; } + /** + * Adds the table to delete from. + * + * @param table the table to delete from. + * @return this builder. + * + * @throws IllegalStateException if a FROM clause has already been provided. + */ public Builder from(TableMetadata table) { if (table != null && tableMetadata != null) throw new IllegalStateException("A FROM clause has already been provided"); @@ -71,6 +99,15 @@ public Builder from(TableMetadata table) { return this; } + /** + * Adds a USING clause to this statement. + * + * @param usings the options to use. + * @return this builderj. + * + * @throws IllegalStateException if a USING clause has already been + * provided. + */ public Builder using(Using... usings) { if (this.usings != null) throw new IllegalStateException("A USING clause has already been provided"); @@ -79,10 +116,22 @@ public Builder using(Using... usings) { return this; } + /** + * Adds a WHERE clause to this statement. + * + * @param clause the clause to add. + * @return the newly built UPDATE statement. + * + * @throws IllegalStateException if WHERE clauses have already been + * provided. + */ public Delete where(Clause... clauses) { - return tableMetadata == null - ? new Delete(keyspace, table, columnNames, clauses, usings) - : new Delete(tableMetadata, columnNames, clauses, usings); + if (tableMetadata != null) + return new Delete(tableMetadata, columnNames, clauses, usings); + else if (table != null) + return new Delete(keyspace, table, columnNames, clauses, usings); + else + throw new IllegalStateException("Missing SET clause"); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java index 84010993335..d131728f8b4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Insert.java @@ -2,6 +2,9 @@ import com.datastax.driver.core.TableMetadata; +/** + * A built INSERT statement. + */ public class Insert extends BuiltStatement { private boolean usingsProvided; @@ -31,6 +34,15 @@ private void init(String keyspaceName, String tableName, String[] columnNames, O maybeAddRoutingKey(columnNames[i], values[i]); } + /** + * Adds a USING clause to this statement. + * + * @param usings the options to use. + * @return this statement. + * + * @throws IllegalStateException if a USING clause has already been + * provided. + */ public Insert using(Using... usings) { if (usingsProvided) throw new IllegalStateException("A USING clause has already been provided"); @@ -61,6 +73,12 @@ public static class Builder { this.columnNames = columnNames; } + /** + * Sets the table to insert into. + * + * @param table the name of the table to insert into. + * @return a new in-construction INSERT statement that inserts into {@code table}. + */ public Builder into(String table) { if (table != null && tableMetadata != null) throw new IllegalStateException("An INTO clause has already been provided"); @@ -68,6 +86,13 @@ public Builder into(String table) { return into(null, table); } + /** + * Sets the table to insert into. + * + * @param keyspace the name of the keyspace to insert into. + * @param table the name of the table to insert into. + * @return a new in-construction INSERT statement that inserts into {@code keyspace.table}. + */ public Builder into(String keyspace, String table) { if (table != null && tableMetadata != null) throw new IllegalStateException("An INTO clause has already been provided"); @@ -77,6 +102,12 @@ public Builder into(String keyspace, String table) { return this; } + /** + * Sets the table to insert into. + * + * @param table the name of the table to insert into. + * @return a new in-construction INSERT statement that inserts into {@code table}. + */ public Builder into(TableMetadata table) { if (table != null && tableMetadata != null) throw new IllegalStateException("An INTO clause has already been provided"); @@ -85,14 +116,30 @@ public Builder into(TableMetadata table) { return this; } + /** + * Specify the values to insert for the insert columns. + * + * @param values the values to insert. The {@code i}th value + * corresponds to the {@code i}th column used when constructing this + * {@code Insert.Builder object}. + * @return the newly built UPDATE statement. + * + * @throws IllegalArgumentException if the number of provided values + * doesn't correspond to the number of columns used when constructing + * this {@code Insert.Builder object}. + * @throws IllegalStateException if no INTO clause have been defined. + */ public Insert values(Object... values) { if (values.length != columnNames.length) throw new IllegalArgumentException(String.format("Number of provided values (%d) doesn't match the number of inserted columns (%d)", values.length, columnNames.length)); - return tableMetadata == null - ? new Insert(keyspace, table, columnNames, values) - : new Insert(tableMetadata, columnNames, values); + if (tableMetadata != null) + return new Insert(tableMetadata, columnNames, values); + else if (table != null) + return new Insert(keyspace, table, columnNames, values); + else + throw new IllegalStateException("Missing INTO clause"); } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index 720fa743788..baa6a99041e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -100,6 +100,12 @@ public static String quote(String columnName) { return sb.toString(); } + /** + * The token of a column name. + * + * @param columnName the column name to take the token of. + * @return {@code "token(" + columnName + ")"}. + */ public static String token(String columnName) { StringBuilder sb = new StringBuilder(); sb.append("token("); @@ -108,6 +114,14 @@ public static String token(String columnName) { return sb.toString(); } + /** + * The token of column names. + *

      + * This variant is most useful when the partition key is composite. + * + * @param columnName the column names to take the token of. + * @return a string reprensenting the token of the provided column names. + */ public static String token(String... columnNames) { StringBuilder sb = new StringBuilder(); sb.append("token("); @@ -176,12 +190,26 @@ public static Delete.Builder delete(String... columns) { return new Delete.Builder(columns); } + /** + * Selects an element of a list by index. + * + * @param columnName the name of the list column. + * @param idx the index to select. + * @return {@code columnName[idx]}. + */ public static String listElt(String columnName, int idx) { StringBuilder sb = new StringBuilder(); Utils.appendName(columnName, sb); return sb.append("[").append(idx).append("]").toString(); } + /** + * Selects an element of a map given a key. + * + * @param columnName the name of the map column. + * @param key the key to select with. + * @return {@code columnName[key]}. + */ public static String mapElt(String columnName, Object key) { StringBuilder sb = new StringBuilder(); Utils.appendName(columnName, sb); diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java index 7a38bf9dc40..8173b9f1036 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Select.java @@ -2,6 +2,9 @@ import com.datastax.driver.core.TableMetadata; +/** + * A built SELECT statement. + */ public class Select extends BuiltStatement { private boolean whereProvided; @@ -31,6 +34,15 @@ private void init(String keyspaceName, String tableName, String[] columnNames) { appendName(tableName); } + /** + * Adds a WHERE clause to this statement. + * + * @param clause the clause to add. + * @return this statement. + * + * @throws IllegalStateException if WHERE clauses have already been + * provided. + */ public Select where(Clause clause) { if (whereProvided) throw new IllegalStateException("A WHERE clause has already been provided"); @@ -43,6 +55,15 @@ public Select where(Clause clause) { return this; } + /** + * Adds WHERE clauses to this statement. + * + * @param clauses the clauses to add. + * @return this statement. + * + * @throws IllegalStateException if WHERE clauses have already been + * provided. + */ public Select where(Clause... clauses) { if (whereProvided) throw new IllegalStateException("A WHERE clause has already been provided"); @@ -57,6 +78,15 @@ public Select where(Clause... clauses) { return this; } + /** + * Adds an ORDER BY clause to this statement. + * + * @param orders the orderings to define for this query. + * @return this statement. + * + * @throws IllegalStateException if an ORDER BY clause has already been + * provided. + */ public Select orderBy(Ordering... orders) { if (orderByProvided) throw new IllegalStateException("An ORDER BY clause has already been provided"); @@ -68,6 +98,15 @@ public Select orderBy(Ordering... orders) { return this; } + /** + * Adds a LIMIT clause to this statement. + * + * @param limit the limit to set. + * @return this statement. + * + * @throws IllegalStateException if a LIMIT clause has already been + * provided. + */ public Select limit(int limit) { if (limitProvided) throw new IllegalStateException("A LIMIT value has already been provided"); @@ -77,6 +116,9 @@ public Select limit(int limit) { return this; } + /** + * An in-construction SELECT statement. + */ public static class Builder { private final String[] columnNames; @@ -85,14 +127,33 @@ public static class Builder { this.columnNames = columnNames; } + /** + * Adds the table to select from. + * + * @param table the name of the table to select from. + * @return a newly built SELECT statement that selects from {@code table}. + */ public Select from(String table) { return new Select(null, table, columnNames); } + /** + * Adds the table to select from. + * + * @param keyspace the name of the keyspace to select from. + * @param table the name of the table to select from. + * @return a newly built SELECT statement that selects from {@code keyspace.table}. + */ public Select from(String keyspace, String table) { return new Select(keyspace, table, columnNames); } + /** + * Adds the table to select from. + * + * @param table the table to select from. + * @return a newly built SELECT statement that selects from {@code table}. + */ public Select from(TableMetadata table) { return new Select(table, columnNames); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java index d3f24f8ebed..e50b0002345 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java @@ -2,6 +2,9 @@ import com.datastax.driver.core.TableMetadata; +/** + * A built UPDATE statement. + */ public class Update extends BuiltStatement { Update(String keyspace, String table, Assignment[] assignments, Clause[] clauses, Using[] usings) { @@ -54,6 +57,15 @@ public static class Builder { this.table = null; } + /** + * Adds a USING clause to this statement. + * + * @param usings the options to use. + * @return this builderj. + * + * @throws IllegalStateException if a USING clause has already been + * provided. + */ public Builder using(Using... usings) { if (this.usings != null) throw new IllegalStateException("A USING clause has already been provided"); @@ -62,6 +74,15 @@ public Builder using(Using... usings) { return this; } + /** + * Adds the columns modification/assignement to set with this UPDATE + * statement. + * + * @param assignements the assigments to set for this statement. + * @return this builder. + * + * @throws IllegalStateException if a SET clause has aready been provided. + */ public Builder set(Assignment... assignments) { if (this.assignments != null) throw new IllegalStateException("A SET clause has already been provided"); @@ -70,10 +91,23 @@ public Builder set(Assignment... assignments) { return this; } + /** + * Adds a WHERE clause to this statement. + * + * @param clause the clause to add. + * @return the newly built UPDATE statement. + * + * @throws IllegalStateException if WHERE clauses have already been + * provided. + */ public Update where(Clause... clauses) { - return table == null - ? new Update(tableMetadata, assignments, clauses, usings) - : new Update(keyspace, table, assignments, clauses, usings); + + if (tableMetadata != null) + return new Update(tableMetadata, assignments, clauses, usings); + else if (table != null) + return new Update(keyspace, table, assignments, clauses, usings); + else + throw new IllegalStateException("Missing SET clause"); } } } From eb6d88f10cd301121186574991f2c28ac74817c4 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 11:37:26 +0100 Subject: [PATCH 081/719] README updates --- driver-core/README | 56 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/driver-core/README b/driver-core/README index 18aa8280f30..704faa960d3 100644 --- a/driver-core/README +++ b/driver-core/README @@ -4,27 +4,59 @@ Driver Core The core module of the Datastax Java Driver for Apache Cassandra (C*). This module offers a simple (as in, not abstracted) but complete API to work with CQL3. The main goal of this module is to handle all the functionality related -to managing connections to a Cassandra cluster (but leaving potentially higher -level abstraction like object mapping to separate modules). +to managing connections to a Cassandra cluster (but leaving higher level +abstraction like object mapping to separate modules). + + +Features +-------- + +The features provided by this core module includes: + - Asynchronous: the driver uses the new CQL binary protocol asynchronous + capabilities. Only a relatively low number of connection per nodes needs to + be maintained open to achieve good performance. + - Nodes discovery: the driver automatically discover and use all nodes of the + C* cluster, including newly bootstrapped ones. + - Configurable load balancing: the driver allow for custom routing/load + balancing of queries to C* nodes. Out of the box, round robin is provided + with optional data-center awareness (only nodes from the local data-center + are queried (and have connections maintained to)) and optional token + awareness (i.e the ability to prefer a replica for the query as coordinator). + - Transparent fail-over. If C* nodes fail (are not reachable), the driver + automatically and transparently tries other nodes and schedule + reconnection to the dead nodes in the background. + - C* tracing handling. Tracing can be set on a per-query basis and the driver + provides a convenient API to retrieve the trace. + - Convenient schema access. The driver exposes the C* schema in a usable way. + - Configurable retry policy. A retry policy can be set to define a precise + comportment to adopt on query execution exceptions (timeouts, unavailable). + This avoids having to litter client code with retry related code. + Prerequisite ------------ This driver uses the binary protocol that will be introduced in C* 1.2. -This will thus only work with a version of C* >= 1.2. Since at the time of this -writing C* 1.2 hasn't been released yet, you will have to at least use the -beta2 release. Furthermore, the binary protocol server is not started with the -default configuration file coming with Cassandra 1.2, so in the cassandra.yaml -file, you will have to set at least: +It will thus only work with a version of C* >= 1.2. Since at the time of this +writing C* 1.2 hasn't been released yet, at least the beta2 release needs to be +used (the beta1 is known to *not* work with this driver). Furthermore, the +binary protocol server is not started with the default configuration file +coming with Cassandra 1.2. In the cassandra.yaml file, you need to set: start_native_transport: true +If you want to run the (currently few) unit tests provided with this driver, +you will also need to have ccm installed (http://github.com/pcmanus/ccm) as the +tests uses it. Also note that the first time you run the tests, ccm will +download/compile the source of C* under the hood, which may require some time +(that depends on your internet connection/machine). + Installing ---------- -At the time of this writing, the driver has not been released yet, so you will -have to compile it manually. The build system is maven and should work as any -other maven project. +This driver has not been released yet and will need to be compiled manually. +The build system is maven and should work as for any other maven project. + Getting Started --------------- @@ -43,8 +75,8 @@ Please note that when we build the Cluster object, we only provide the address to 2 Cassandra hosts. We could have provided only one host or the 3 of them, this doesn't matter as long as the driver is able to contact one of the host provided as "contact points". Even if only one host was provided, the driver -will use this host to discover the other ones and use the whole cluster -automtically. This is also true for new nodes joining the cluster. +would use this host to discover the other ones and use the whole cluster +automatically. This is also true for new nodes joining the cluster. For now, please refer to the JavaDoc for more informations, more documentation will come later. From e0222299e4826e3ff22340516afdba727bd8de14 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 16:48:41 +0100 Subject: [PATCH 082/719] Fix stress example --- driver-core/pom.xml | 4 +-- driver-examples/pom.xml | 3 --- .../com/datastax/driver/stress/Consumer.java | 4 +-- .../datastax/driver/stress/Generators.java | 5 ++-- .../driver/stress/QueryGenerator.java | 26 ++++++++----------- .../com/datastax/driver/stress/Stress.java | 9 +++---- pom.xml | 17 ++++++------ 7 files changed, 30 insertions(+), 38 deletions(-) diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 6307f7a21dc..6276e12e7a7 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -44,8 +44,8 @@ org.codehaus.jackson - jackson-mapper-asl - 1.9.8 + jackson-core-asl + 1.4.0 diff --git a/driver-examples/pom.xml b/driver-examples/pom.xml index 9acbce78b8c..1889ef7d81e 100644 --- a/driver-examples/pom.xml +++ b/driver-examples/pom.xml @@ -17,9 +17,6 @@ stress - - - diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java index 41aeca976e4..03dbd72dcf5 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Consumer.java @@ -76,11 +76,11 @@ private static class Result { static final Result END_MARKER = new Result(null, null, null); - public final ResultSet.Future future; + public final ResultSetFuture future; public final TimerContext context; public final Meter requests; - public Result(ResultSet.Future future, TimerContext context, Meter requests) { + public Result(ResultSetFuture future, TimerContext context, Meter requests) { this.future = future; this.context = context; this.requests = requests; diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java index b743a23e7fe..2cf590a08ad 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java @@ -1,7 +1,6 @@ package com.datastax.driver.examples.stress; import com.datastax.driver.core.*; -import com.datastax.driver.core.configuration.*; import com.datastax.driver.core.exceptions.*; public class Generators { @@ -27,7 +26,7 @@ public boolean hasNext() { public QueryGenerator.Request next() { String query = String.format("INSERT INTO stress_cf(k, c, v) VALUES (%d, %d, %d)", i, i, i); ++i; - return new QueryGenerator.Request.SimpleQuery(query, new QueryOptions()); + return new QueryGenerator.Request.SimpleQuery(new SimpleStatement(query)); } public void remove() { @@ -61,7 +60,7 @@ public boolean hasNext() { public QueryGenerator.Request next() { BoundStatement b = stmt.bind(i, i, i); ++i; - return new QueryGenerator.Request.PreparedQuery(b, new QueryOptions()); + return new QueryGenerator.Request.PreparedQuery(b); } public void remove() { diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java index 4dbac563a0b..80efd0a79a1 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java @@ -9,7 +9,7 @@ public abstract class QueryGenerator implements Iterator static final Request DONE_MARKER = new Request() { public ResultSet execute(Session session) throws NoHostAvailableException { return null; } - public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { return null; }; + public ResultSetFuture executeAsync(Session session) throws NoHostAvailableException { return null; }; }; protected final int iterations; @@ -28,43 +28,39 @@ public interface Request { public ResultSet execute(Session session) throws NoHostAvailableException; - public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException; + public ResultSetFuture executeAsync(Session session) throws NoHostAvailableException; public static class SimpleQuery implements Request { - private final String query; - private final QueryOptions options; + private final Query query; - public SimpleQuery(String query, QueryOptions options) { + public SimpleQuery(Query query) { this.query = query; - this.options = options; } public ResultSet execute(Session session) throws NoHostAvailableException { - return session.execute(query, options); + return session.execute(query); } - public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { - return session.executeAsync(query, options); + public ResultSetFuture executeAsync(Session session) throws NoHostAvailableException { + return session.executeAsync(query); } } public static class PreparedQuery implements Request { private final BoundStatement query; - private final QueryOptions options; - public PreparedQuery(BoundStatement query, QueryOptions options) { + public PreparedQuery(BoundStatement query) { this.query = query; - this.options = options; } public ResultSet execute(Session session) throws NoHostAvailableException { - return session.executePrepared(query, options); + return session.execute(query); } - public ResultSet.Future executeAsync(Session session) throws NoHostAvailableException { - return session.executePreparedAsync(query, options); + public ResultSetFuture executeAsync(Session session) throws NoHostAvailableException { + return session.executeAsync(query); } } } diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java index 56fe0af9308..715120aba6c 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java @@ -4,7 +4,6 @@ import java.util.concurrent.*; import com.datastax.driver.core.*; -import com.datastax.driver.core.configuration.*; import com.datastax.driver.core.exceptions.*; import joptsimple.OptionParser; @@ -33,19 +32,19 @@ public static void main(String[] args) throws Exception { register("insert", Generators.SIMPLE_INSERTER); register("insert_prepared", Generators.SIMPLE_PREPARED_INSERTER); - if (args.length < 2) { + if (args.length < 1) { System.err.println("Missing argument, you must at least provide the action to do"); System.exit(1); } - String action = args[1]; + String action = args[0]; if (!generators.containsKey(action)) { System.err.println(String.format("Unknown generator '%s' (known generators: %s)", action, generators.keySet())); System.exit(1); } - String[] opts = new String[args.length - 2]; - System.arraycopy(args, 2, opts, 0, opts.length); + String[] opts = new String[args.length - 1]; + System.arraycopy(args, 1, opts, 0, opts.length); OptionParser parser = new OptionParser(); diff --git a/pom.xml b/pom.xml index f1c135a5b3a..5e8f9a59a0f 100644 --- a/pom.xml +++ b/pom.xml @@ -45,15 +45,16 @@ maven-compiler-plugin - 2.5.1 - - 1.6 - 1.6 - true - true - true - + 2.5.1 + + 1.6 + 1.6 + true + true + true + + From a8ed6929504358b11d48703fa41103451dc7d0a7 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 16:57:54 +0100 Subject: [PATCH 083/719] Remove jdbc repository --- README | 3 ++- driver-jdbc/pom.xml | 18 ------------------ pom.xml | 1 - 3 files changed, 2 insertions(+), 20 deletions(-) delete mode 100644 driver-jdbc/pom.xml diff --git a/README b/README index b67ecbca5ef..30f25d677bc 100644 --- a/README +++ b/README @@ -11,8 +11,9 @@ relatively low-level, API on top of which higher level layer can build. The driver contains the following modules: - driver-core: the core layer. - - driver-mapping: ... - driver-examples: example applications using the other modules. Those are only meant for demonstration purposes. +More modules including a simple object mapper will come shortly. + Please refer to the README of each module for more information. diff --git a/driver-jdbc/pom.xml b/driver-jdbc/pom.xml deleted file mode 100644 index be3178d4123..00000000000 --- a/driver-jdbc/pom.xml +++ /dev/null @@ -1,18 +0,0 @@ - - 4.0.0 - - com.datastax.cassandra - cassandra-driver-parent - 0.1.0-SNAPSHOT - - cassandra-driver-jdbc - jar - Cassandra Java Driver - JDBC - http://www.datastax.com - - - - - - diff --git a/pom.xml b/pom.xml index 5e8f9a59a0f..f805f826de2 100644 --- a/pom.xml +++ b/pom.xml @@ -10,7 +10,6 @@ driver-core - driver-jdbc driver-examples From da71e4707f75f39afa12eaa9fbbe3b74bafbd264 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 17:00:14 +0100 Subject: [PATCH 084/719] Fix formatting --- driver-core/README | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/driver-core/README b/driver-core/README index 704faa960d3..fd718b1d76c 100644 --- a/driver-core/README +++ b/driver-core/README @@ -64,7 +64,9 @@ Getting Started Suppose you have a Cassandra cluster running on 3 nodes whose hostnames are: cass1, cass2 and cass3. A simple example using this core driver could be: - Cluster cluster = new Cluster.Builder().addContactPoints("cass1", "cass2").build(); + Cluster cluster = new Cluster.Builder() + .addContactPoints("cass1", "cass2") + .build(); Session session = cluster.connect("db1"); for (CQLRow row : session.execute("SELECT * FROM table1")) From b3ee115ba54661c2459db2c41787137b339242eb Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 17:22:44 +0100 Subject: [PATCH 085/719] Fix missing refresh of token map when nodes are removed --- .../main/java/com/datastax/driver/core/ControlConnection.java | 1 + 1 file changed, 1 insertion(+) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index cbfbe0c92d8..c311f8badd1 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -357,5 +357,6 @@ public void onAdd(Host host) { public void onRemove(Host host) { balancingPolicy.onRemove(host); + refreshNodeListAndTokenMap(); } } From 6ed8b018159078da25315a150aa2583b2cf3a507 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 17:24:27 +0100 Subject: [PATCH 086/719] Add license file --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From f6804eff2e25a80d7aa0849d58f5e230dfe920c1 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 19 Nov 2012 19:37:05 +0100 Subject: [PATCH 087/719] Add javadoc link to the README --- driver-core/README | 4 ++-- .../com/datastax/driver/core/utils/querybuilder/Delete.java | 2 +- .../datastax/driver/core/utils/querybuilder/QueryBuilder.java | 2 +- .../com/datastax/driver/core/utils/querybuilder/Update.java | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/driver-core/README b/driver-core/README index fd718b1d76c..697f8acb6f0 100644 --- a/driver-core/README +++ b/driver-core/README @@ -80,5 +80,5 @@ provided as "contact points". Even if only one host was provided, the driver would use this host to discover the other ones and use the whole cluster automatically. This is also true for new nodes joining the cluster. -For now, please refer to the JavaDoc for more informations, more documentation -will come later. +For now, please refer to the JavaDoc (http://www.datastax.com/drivers/java/apidocs/) +for more informations, more documentation will come later. diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java index dc79bc6fb84..a4f7e2c0689 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Delete.java @@ -119,7 +119,7 @@ public Builder using(Using... usings) { /** * Adds a WHERE clause to this statement. * - * @param clause the clause to add. + * @param clauses the clause to add. * @return the newly built UPDATE statement. * * @throws IllegalStateException if WHERE clauses have already been diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index baa6a99041e..36530015652 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -119,7 +119,7 @@ public static String token(String columnName) { *

      * This variant is most useful when the partition key is composite. * - * @param columnName the column names to take the token of. + * @param columnNames the column names to take the token of. * @return a string reprensenting the token of the provided column names. */ public static String token(String... columnNames) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java index e50b0002345..921891bba53 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Update.java @@ -78,7 +78,7 @@ public Builder using(Using... usings) { * Adds the columns modification/assignement to set with this UPDATE * statement. * - * @param assignements the assigments to set for this statement. + * @param assignments the assigments to set for this statement. * @return this builder. * * @throws IllegalStateException if a SET clause has aready been provided. @@ -94,7 +94,7 @@ public Builder set(Assignment... assignments) { /** * Adds a WHERE clause to this statement. * - * @param clause the clause to add. + * @param clauses the clause to add. * @return the newly built UPDATE statement. * * @throws IllegalStateException if WHERE clauses have already been From a3227c88e1a269fc47acb86a28b7ed70829d6e41 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 21 Nov 2012 09:15:35 +0100 Subject: [PATCH 088/719] Remove CQL header from class CQLRow and CQLStatement --- .../com/datastax/driver/core/Cluster.java | 2 +- .../datastax/driver/core/ClusterMetadata.java | 28 +++++++++---------- .../datastax/driver/core/ColumnMetadata.java | 6 ++-- .../driver/core/ControlConnection.java | 8 +++--- .../driver/core/KeyspaceMetadata.java | 2 +- .../java/com/datastax/driver/core/Query.java | 6 ++-- .../com/datastax/driver/core/QueryTrace.java | 4 +-- .../com/datastax/driver/core/ResultSet.java | 20 ++++++------- .../driver/core/{CQLRow.java => Row.java} | 14 +++++----- .../com/datastax/driver/core/Session.java | 8 +++--- .../datastax/driver/core/SimpleStatement.java | 4 +-- .../{CQLStatement.java => Statement.java} | 4 +-- .../datastax/driver/core/TableMetadata.java | 4 +-- .../driver/core/utils/querybuilder/Batch.java | 8 +++--- .../utils/querybuilder/BuiltStatement.java | 4 +-- .../core/utils/querybuilder/QueryBuilder.java | 6 ++-- .../driver/core/PreparedStatementTest.java | 10 +++---- .../com/datastax/driver/core/SessionTest.java | 2 +- .../com/datastax/driver/core/TestUtils.java | 2 +- .../utils/QueryBuilderRoutingKeyTest.java | 8 +++--- 20 files changed, 75 insertions(+), 75 deletions(-) rename driver-core/src/main/java/com/datastax/driver/core/{CQLRow.java => Row.java} (98%) rename driver-core/src/main/java/com/datastax/driver/core/{CQLStatement.java => Statement.java} (82%) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 38d83b21dfc..4c18c99decc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -31,7 +31,7 @@ * Cluster cluster = new Cluster.Builder().addContactPoint("192.168.0.1").build(); * Session session = cluster.connect("db1"); * - * for (CQLRow row : session.execute("SELECT * FROM table1")) + * for (Row row : session.execute("SELECT * FROM table1")) * // do something ... *

*

diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index 72f9878b44c..f4d9e6d3552 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -32,32 +32,32 @@ public class ClusterMetadata { // Synchronized to make it easy to detect dropped keyspaces synchronized void rebuildSchema(String keyspace, String table, ResultSet ks, ResultSet cfs, ResultSet cols) { - Map> cfDefs = new HashMap>(); - Map>> colsDefs = new HashMap>>(); + Map> cfDefs = new HashMap>(); + Map>> colsDefs = new HashMap>>(); // Gather cf defs - for (CQLRow row : cfs) { + for (Row row : cfs) { String ksName = row.getString(KeyspaceMetadata.KS_NAME); - List l = cfDefs.get(ksName); + List l = cfDefs.get(ksName); if (l == null) { - l = new ArrayList(); + l = new ArrayList(); cfDefs.put(ksName, l); } l.add(row); } // Gather columns per Cf - for (CQLRow row : cols) { + for (Row row : cols) { String ksName = row.getString(KeyspaceMetadata.KS_NAME); String cfName = row.getString(TableMetadata.CF_NAME); - Map> colsByCf = colsDefs.get(ksName); + Map> colsByCf = colsDefs.get(ksName); if (colsByCf == null) { - colsByCf = new HashMap>(); + colsByCf = new HashMap>(); colsDefs.put(ksName, colsByCf); } - List l = colsByCf.get(cfName); + List l = colsByCf.get(cfName); if (l == null) { - l = new ArrayList(); + l = new ArrayList(); colsByCf.put(cfName, l); } l.add(row); @@ -66,7 +66,7 @@ synchronized void rebuildSchema(String keyspace, String table, ResultSet ks, Res if (table == null) { assert ks != null; Set addedKs = new HashSet(); - for (CQLRow ksRow : ks) { + for (Row ksRow : ks) { String ksName = ksRow.getString(KeyspaceMetadata.KS_NAME); KeyspaceMetadata ksm = KeyspaceMetadata.build(ksRow); @@ -103,15 +103,15 @@ synchronized void rebuildSchema(String keyspace, String table, ResultSet ks, Res } } - private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows, Map> colsDefs) { - for (CQLRow cfRow : cfRows) { + private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows, Map> colsDefs) { + for (Row cfRow : cfRows) { String cfName = cfRow.getString(TableMetadata.CF_NAME); TableMetadata tm = TableMetadata.build(ksm, cfRow, !colsDefs.isEmpty()); if (colsDefs == null || colsDefs.get(cfName) == null) continue; - for (CQLRow colRow : colsDefs.get(cfName)) { + for (Row colRow : colsDefs.get(cfName)) { ColumnMetadata cm = ColumnMetadata.build(tm, colRow); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java index fed1d36889f..68cca9e8604 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnMetadata.java @@ -20,14 +20,14 @@ public class ColumnMetadata { private final DataType type; private final IndexMetadata index; - ColumnMetadata(TableMetadata table, String name, DataType type, CQLRow row) { + ColumnMetadata(TableMetadata table, String name, DataType type, Row row) { this.table = table; this.name = name; this.type = type; this.index = IndexMetadata.build(this, row); } - static ColumnMetadata build(TableMetadata tm, CQLRow row) { + static ColumnMetadata build(TableMetadata tm, Row row) { try { String name = row.getString(COLUMN_NAME); AbstractType t = TypeParser.parse(row.getString(VALIDATOR)); @@ -131,7 +131,7 @@ public String asCQLQuery() { return String.format("CREATE INDEX %s ON %s.%s (%s)", name, table.getKeyspace().getName(), table.getName(), column.getName()); } - private static IndexMetadata build(ColumnMetadata column, CQLRow row) { + private static IndexMetadata build(ColumnMetadata column, Row row) { if (row == null) return null; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index c311f8badd1..c3f23189906 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -241,7 +241,7 @@ private void refreshNodeListAndTokenMap(Connection connection) throws Connection Map> tokenMap = new HashMap>(); // Update cluster name, DC and rack for the one node we are connected to - CQLRow localRow = localFuture.get().fetchOne(); + Row localRow = localFuture.get().fetchOne(); if (localRow != null) { String clusterName = localRow.getString("cluster_name"); if (clusterName != null) @@ -266,7 +266,7 @@ private void refreshNodeListAndTokenMap(Connection connection) throws Connection List racks = new ArrayList(); List> allTokens = new ArrayList>(); - for (CQLRow row : peersFuture.get()) { + for (Row row : peersFuture.get()) { if (!row.isNull("peer")) { foundHosts.add(row.getInet("peer")); dcs.add(row.getString("data_center")); @@ -310,11 +310,11 @@ static boolean waitForSchemaAgreement(Connection connection, ClusterMetadata met Set versions = new HashSet(); - //CQLRow localRow = localFuture.get().fetchOne(); + //Row localRow = localFuture.get().fetchOne(); //if (localRow != null && !localRow.isNull("schema_version")) // versions.add(row.getUUID("schema_version")); - for (CQLRow row : peersFuture.get()) { + for (Row row : peersFuture.get()) { if (row.isNull("peer") || row.isNull("schema_version")) continue; diff --git a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java index e3ab11d493d..db933eb57b4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/KeyspaceMetadata.java @@ -24,7 +24,7 @@ private KeyspaceMetadata(String name, boolean durableWrites) { this.durableWrites = durableWrites; } - static KeyspaceMetadata build(CQLRow row) { + static KeyspaceMetadata build(Row row) { String name = row.getString(KS_NAME); boolean durableWrites = row.getBool(DURABLE_WRITES); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Query.java b/driver-core/src/main/java/com/datastax/driver/core/Query.java index c3075b8e32c..90f34181153 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Query.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Query.java @@ -5,19 +5,19 @@ /** * An executable query. *

- * This represents either a {@link CQLStatement} or a {@link BoundStatement} + * This represents either a {@link Statement} or a {@link BoundStatement} * along with the query options (consistency level, whether to trace the query, ...). */ public abstract class Query { - // An exception to the CQLStatement or BoundStatement rule above. This is + // An exception to the Statement or BoundStatement rule above. This is // used when preparing a statement and for other internal queries. Do not expose publicly. static final Query DEFAULT = new Query() { public ByteBuffer getRoutingKey() { return null; } }; private volatile ConsistencyLevel consistency; private volatile boolean traceQuery; - // We don't want to expose the constructor, because the code rely on this being only subclassed by CQLStatement and BoundStatement + // We don't want to expose the constructor, because the code rely on this being only subclassed by Statement and BoundStatement Query() { this.consistency = ConsistencyLevel.ONE; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java index c4814f96cea..60835f85a7a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java +++ b/driver-core/src/main/java/com/datastax/driver/core/QueryTrace.java @@ -152,7 +152,7 @@ private void doFetchTrace() { ResultSetFuture sessionsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_SESSIONS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); ResultSetFuture eventsFuture = session.executeQuery(new QueryMessage(String.format(SELECT_EVENTS_FORMAT, traceId), ConsistencyLevel.DEFAULT_CASSANDRA_CL), Query.DEFAULT); - CQLRow sessRow = sessionsFuture.get().fetchOne(); + Row sessRow = sessionsFuture.get().fetchOne(); if (sessRow != null) { requestType = sessRow.getString("request"); if (!sessRow.isNull("duration")) @@ -164,7 +164,7 @@ private void doFetchTrace() { } events = new ArrayList(); - for (CQLRow evRow : eventsFuture.get()) { + for (Row evRow : eventsFuture.get()) { events.add(new Event(evRow.getString("activity"), evRow.getUUID("event_id").timestamp(), evRow.getInet("source"), diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java index 5a0d271a6ed..ab10d5a1120 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSet.java @@ -15,7 +15,7 @@ * * Note that this class is not thread-safe. */ -public class ResultSet implements Iterable { +public class ResultSet implements Iterable { private static final Logger logger = LoggerFactory.getLogger(ResultSet.class); @@ -96,8 +96,8 @@ public boolean isExhausted() { * @return the next row in this resultSet or null if this ResultSet is * exhausted. */ - public CQLRow fetchOne() { - return CQLRow.fromData(metadata, rows.poll()); + public Row fetchOne() { + return Row.fromData(metadata, rows.poll()); } /** @@ -106,12 +106,12 @@ public CQLRow fetchOne() { * @return a list containing the remaining results of this ResultSet. The * returned list is empty if and only the ResultSet is exhausted. */ - public List fetchAll() { + public List fetchAll() { if (isExhausted()) return Collections.emptyList(); - List result = new ArrayList(rows.size()); - for (CQLRow row : this) + List result = new ArrayList(rows.size()); + for (Row row : this) result.add(row); return result; } @@ -128,15 +128,15 @@ public List fetchAll() { * @return an iterator that will consume and return the remaining rows of * this ResultSet. */ - public Iterator iterator() { - return new Iterator() { + public Iterator iterator() { + return new Iterator() { public boolean hasNext() { return !rows.isEmpty(); } - public CQLRow next() { - return CQLRow.fromData(metadata, rows.poll()); + public Row next() { + return Row.fromData(metadata, rows.poll()); } public void remove() { diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java b/driver-core/src/main/java/com/datastax/driver/core/Row.java similarity index 98% rename from driver-core/src/main/java/com/datastax/driver/core/CQLRow.java rename to driver-core/src/main/java/com/datastax/driver/core/Row.java index 08d09bc5199..adbc2374705 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLRow.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Row.java @@ -13,27 +13,27 @@ /** * A CQL Row returned in a {@link ResultSet}. */ -public class CQLRow { +public class Row { private final ColumnDefinitions metadata; private final List data; - private CQLRow(ColumnDefinitions metadata, List data) { + private Row(ColumnDefinitions metadata, List data) { this.metadata = metadata; this.data = data; } - static CQLRow fromData(ColumnDefinitions metadata, List data) { + static Row fromData(ColumnDefinitions metadata, List data) { if (data == null) return null; - return new CQLRow(metadata, data); + return new Row(metadata, data); } /** - * The columns contained in this CQLRow. + * The columns contained in this Row. * - * @return the columns contained in this CQLRow. + * @return the columns contained in this Row. */ public ColumnDefinitions getColumnDefinitions() { return metadata; @@ -694,7 +694,7 @@ public Map getMap(String name, Class keysClass, Class valuesC @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("CQLRow["); + sb.append("Row["); for (int i = 0; i < metadata.size(); i++) { if (i != 0) sb.append(", "); diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 3a91bd21da0..3575896093f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -69,7 +69,7 @@ public ResultSet execute(String query) throws NoHostAvailableException { * by this method. * * @param query the CQL query to execute (that can be either a {@code - * CQLStatement} or a {@code BoundStatement}). If it is a {@code + * Statement} or a {@code BoundStatement}). If it is a {@code * BoundStatement}, all variables must have been bound (the statement must * be ready). * @return the result of the query. That result will never be null but can @@ -115,7 +115,7 @@ public ResultSetFuture executeAsync(String query) { * method) to make sure the query was successful. * * @param query the CQL query to execute (that can be either a {@code - * CQLStatement} or a {@code BoundStatement}). If it is a {@code + * Statement} or a {@code BoundStatement}). If it is a {@code * BoundStatement}, all variables must have been bound (the statement must * be ready). * @return a future on the result of the query. @@ -125,8 +125,8 @@ public ResultSetFuture executeAsync(String query) { */ public ResultSetFuture executeAsync(Query query) { - if (query instanceof CQLStatement) { - return manager.executeQuery(new QueryMessage(((CQLStatement)query).getQueryString(), ConsistencyLevel.toCassandraCL(query.getConsistencyLevel())), query); + if (query instanceof Statement) { + return manager.executeQuery(new QueryMessage(((Statement)query).getQueryString(), ConsistencyLevel.toCassandraCL(query.getConsistencyLevel())), query); } else { assert query instanceof BoundStatement : query; diff --git a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java index d5e0e4055ea..855ccd1f771 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SimpleStatement.java @@ -3,10 +3,10 @@ import java.nio.ByteBuffer; /** - * A simple {@code CQLStatement} implementation built directly from a query + * A simple {@code Statement} implementation built directly from a query * string. */ -public class SimpleStatement extends CQLStatement { +public class SimpleStatement extends Statement { private final String query; private volatile ByteBuffer routingKey; diff --git a/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java b/driver-core/src/main/java/com/datastax/driver/core/Statement.java similarity index 82% rename from driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java rename to driver-core/src/main/java/com/datastax/driver/core/Statement.java index 9b21b597e44..ab243727059 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/CQLStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Statement.java @@ -5,9 +5,9 @@ *

* This class represents a query string along with query options. This class * can be extended but {@link SimpleStatement} is provided to build a {@code - * CQLStatement} directly from its query string. + * Statement} directly from its query string. */ -public abstract class CQLStatement extends Query { +public abstract class Statement extends Query { /** * The query string for this statement. diff --git a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java index bbc3ce6a12b..3af5bcb564e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/TableMetadata.java @@ -48,7 +48,7 @@ private TableMetadata(KeyspaceMetadata keyspace, this.options = options; } - static TableMetadata build(KeyspaceMetadata ksm, CQLRow row, boolean hasColumnMetadata) { + static TableMetadata build(KeyspaceMetadata ksm, Row row, boolean hasColumnMetadata) { try { String name = row.getString(CF_NAME); @@ -406,7 +406,7 @@ public static class Options { private final Map compaction = new HashMap(); private final Map compression = new HashMap(); - Options(CQLRow row, boolean isCompactStorage) { + Options(Row row, boolean isCompactStorage) { this.isCompactStorage = isCompactStorage; this.comment = row.isNull(COMMENT) ? "" : row.getString(COMMENT); this.readRepair = row.getDouble(READ_REPAIR); diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java index aaddd231d8e..6b8e3aa3542 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/Batch.java @@ -2,20 +2,20 @@ import java.nio.ByteBuffer; -import com.datastax.driver.core.CQLStatement; +import com.datastax.driver.core.Statement; /** * A built BATCH statement. */ -public class Batch extends CQLStatement { +public class Batch extends Statement { private final ByteBuffer routingKey; - private final CQLStatement[] statements; + private final Statement[] statements; private Using[] usings; private StringBuilder builder; - Batch(CQLStatement[] statements) { + Batch(Statement[] statements) { if (statements.length == 0) throw new IllegalArgumentException("Cannot build a BATCH without any statement"); diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java index 4a967e51e6d..e5bb8bbad0e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/BuiltStatement.java @@ -4,10 +4,10 @@ import java.util.List; import com.datastax.driver.core.ColumnMetadata; -import com.datastax.driver.core.CQLStatement; +import com.datastax.driver.core.Statement; import com.datastax.driver.core.TableMetadata; -abstract class BuiltStatement extends CQLStatement { +abstract class BuiltStatement extends Statement { private final List partitionKey; private final ByteBuffer[] routingKey; diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index 36530015652..78af7ee5ffa 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -1,6 +1,6 @@ package com.datastax.driver.core.utils.querybuilder; -import com.datastax.driver.core.CQLStatement; +import com.datastax.driver.core.Statement; import com.datastax.driver.core.TableMetadata; /** @@ -222,9 +222,9 @@ public static String mapElt(String columnName, Object key) { * Built a new BATCH query on the provided statement. * * @param statements the statements to batch. - * @return a new {@code CQLStatement} that batch {@code statements}. + * @return a new {@code Statement} that batch {@code statements}. */ - public static Batch batch(CQLStatement... statements) { + public static Batch batch(Statement... statements) { return new Batch(statements); } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java index 542a606c847..ae309de208e 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/PreparedStatementTest.java @@ -11,7 +11,7 @@ /** * Prepared statement tests. * - * Note: this class also happens to test all the get methods from CQLRow. + * Note: this class also happens to test all the get methods from Row. */ public class PreparedStatementTest extends CCMBridge.PerClassSingleNodeCluster { @@ -92,7 +92,7 @@ public void preparedNativeTest() throws NoHostAvailableException { BoundStatement bs = ps.newBoundStatement(); session.execute(setBoundValue(bs, name, type, getFixedValue(type))); - CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)).fetchOne(); + Row row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_native'", name, ALL_NATIVE_TABLE)).fetchOne(); assertEquals("For type " + type, getFixedValue(type), getValue(row, name, type)); } } @@ -112,7 +112,7 @@ public void prepareListTest() throws NoHostAvailableException { BoundStatement bs = ps.newBoundStatement(); session.execute(setBoundValue(bs, name, type, value)); - CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)).fetchOne(); + Row row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_list'", name, ALL_LIST_TABLE)).fetchOne(); assertEquals("For type " + type, value, getValue(row, name, type)); } } @@ -132,7 +132,7 @@ public void prepareSetTest() throws NoHostAvailableException { BoundStatement bs = ps.newBoundStatement(); session.execute(setBoundValue(bs, name, type, value)); - CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)).fetchOne(); + Row row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_set'", name, ALL_SET_TABLE)).fetchOne(); assertEquals("For type " + type, value, getValue(row, name, type)); } } @@ -157,7 +157,7 @@ public void prepareMapTest() throws NoHostAvailableException { BoundStatement bs = ps.newBoundStatement(); session.execute(setBoundValue(bs, name, type, value)); - CQLRow row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)).fetchOne(); + Row row = session.execute(String.format("SELECT %s FROM %s WHERE k='prepared_map'", name, ALL_MAP_TABLE)).fetchOne(); assertEquals("For type " + type, value, getValue(row, name, type)); } } diff --git a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java index 4b5d2cb34b2..2b1bcf7b098 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/SessionTest.java @@ -59,7 +59,7 @@ public void executePreparedTest() throws Exception { private static void checkExecuteResultSet(ResultSet rs, String key) { assertTrue(!rs.isExhausted()); - CQLRow row = rs.fetchOne(); + Row row = rs.fetchOne(); assertTrue(rs.isExhausted()); assertEquals(key, row.getString("k")); assertEquals("foo", row.getString("t")); diff --git a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java index 8b8f43cfc35..90bd5d32dcb 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java +++ b/driver-core/src/test/java/com/datastax/driver/core/TestUtils.java @@ -85,7 +85,7 @@ public static BoundStatement setBoundValue(BoundStatement bs, String name, DataT return bs; } - public static Object getValue(CQLRow row, String name, DataType type) { + public static Object getValue(Row row, String name, DataType type) { switch (type.getName()) { case ASCII: return row.getString(name); diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java index 1329f4316ae..440b9dac95d 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/QueryBuilderRoutingKeyTest.java @@ -26,7 +26,7 @@ protected Collection getTableDefinitions() { @Test public void textRoutingKeyTest() throws Exception { - CQLStatement query; + Statement query; TableMetadata table = cluster.getMetadata().getKeyspace(TestUtils.SIMPLE_KEYSPACE).getTable(TABLE_TEXT); assertNotNull(table); @@ -37,7 +37,7 @@ public void textRoutingKeyTest() throws Exception { query = select().from(table).where(eq("k", txt)); assertEquals(ByteBuffer.wrap(txt.getBytes()), query.getRoutingKey()); - CQLRow row = session.execute(query).fetchOne(); + Row row = session.execute(query).fetchOne(); assertEquals(txt, row.getString("k")); assertEquals(1, row.getInt("a")); assertEquals(2, row.getInt("b")); @@ -46,7 +46,7 @@ public void textRoutingKeyTest() throws Exception { @Test public void intRoutingKeyTest() throws Exception { - CQLStatement query; + Statement query; TableMetadata table = cluster.getMetadata().getKeyspace(TestUtils.SIMPLE_KEYSPACE).getTable(TABLE_INT); assertNotNull(table); @@ -58,7 +58,7 @@ public void intRoutingKeyTest() throws Exception { query = select().from(table).where(eq("k", 42)); assertEquals(bb, query.getRoutingKey()); - CQLRow row = session.execute(query).fetchOne(); + Row row = session.execute(query).fetchOne(); assertEquals(42, row.getInt("k")); assertEquals(1, row.getInt("a")); assertEquals(2, row.getInt("b")); From 91169419af270f1b7a4b7a7468b733fba0338bba Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 21 Nov 2012 09:23:24 +0100 Subject: [PATCH 089/719] Add Cluster.builder() static shortcut --- driver-core/README | 2 +- driver-core/TODO | 7 ++++--- .../main/java/com/datastax/driver/core/Cluster.java | 13 ++++++++++++- .../java/com/datastax/driver/core/CCMBridge.java | 2 +- .../driver/core/LoadBalancingPolicyTest.java | 6 +++--- 5 files changed, 21 insertions(+), 9 deletions(-) diff --git a/driver-core/README b/driver-core/README index 697f8acb6f0..25db2da5109 100644 --- a/driver-core/README +++ b/driver-core/README @@ -64,7 +64,7 @@ Getting Started Suppose you have a Cassandra cluster running on 3 nodes whose hostnames are: cass1, cass2 and cass3. A simple example using this core driver could be: - Cluster cluster = new Cluster.Builder() + Cluster cluster = Cluster.builder() .addContactPoints("cass1", "cass2") .build(); Session session = cluster.connect("db1"); diff --git a/driver-core/TODO b/driver-core/TODO index 13af3111e59..89fdcb156c6 100644 --- a/driver-core/TODO +++ b/driver-core/TODO @@ -1,5 +1,6 @@ TODO: -- Add tests -- Add documentation -- Clean up dependencies (don't require the full Cassandra jar) +- More tests +- Add documentation (other than javadoc) +- Metrics +- Clean up dependencies (e.g. don't pull the full Cassandra jar) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 4c18c99decc..785a8a2766d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -28,7 +28,7 @@ * This is the main entry point of the driver. A simple example of access to a * Cassandra cluster would be: *

- *   Cluster cluster = new Cluster.Builder().addContactPoint("192.168.0.1").build();
+ *   Cluster cluster = Cluster.builder().addContactPoint("192.168.0.1").build();
  *   Session session = cluster.connect("db1");
  *
  *   for (Row row : session.execute("SELECT * FROM table1"))
@@ -91,6 +91,17 @@ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableE
         return new Cluster(contactPoints, initializer.getPort(), initializer.getPolicies(), initializer.getAuthInfoProvider());
     }
 
+    /**
+     * Creates a new {@link Cluster.Builder} instance.
+     * 

+ * This is a shortcut for {@code new Cluster.Builder()}. + * + * @return the new cluster builder. + */ + public static Cluster.Builder builder() { + return new Cluster.Builder(); + } + /** * Creates a new session on this cluster. * diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index 05fbc863329..6f7da01e541 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -150,7 +150,7 @@ public static void createCluster() { schemaCreated = false; cassandraCluster = CCMBridge.create("test", 1); try { - cluster = new Cluster.Builder().addContactPoints("127.0.0.1").build(); + cluster = Cluster.builder().addContactPoints("127.0.0.1").build(); session = cluster.connect(); } catch (NoHostAvailableException e) { erroredOut = true; diff --git a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java index 753a8b70e94..2cc21605a70 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/LoadBalancingPolicyTest.java @@ -79,7 +79,7 @@ private void query(CCMBridge.CCMCluster c, int n, boolean usePrepared) throws No @Test public void roundRobinTest() throws Throwable { - Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicy(new RoundRobinPolicy()); + Cluster.Builder builder = Cluster.builder().withLoadBalancingPolicy(new RoundRobinPolicy()); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); createSchema(c.session); try { @@ -111,7 +111,7 @@ public void roundRobinTest() throws Throwable { @Test public void DCAwareRoundRobinTest() throws Throwable { - Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicy(new DCAwareRoundRobinPolicy("dc2")); + Cluster.Builder builder = Cluster.builder().withLoadBalancingPolicy(new DCAwareRoundRobinPolicy("dc2")); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, 2, builder); createMultiDCSchema(c.session); try { @@ -143,7 +143,7 @@ public void tokenAwarePreparedTest() throws Throwable { } public void tokenAwareTest(boolean usePrepared) throws Throwable { - Cluster.Builder builder = new Cluster.Builder().withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())); + Cluster.Builder builder = Cluster.builder().withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy())); CCMBridge.CCMCluster c = CCMBridge.buildCluster(2, builder); createSchema(c.session); try { From 4d04b109e0f86e4e5890ebbb52fd4287efd49071 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 21 Nov 2012 17:53:07 +0100 Subject: [PATCH 090/719] Minor modifications to stress examples --- .../com/datastax/driver/core/Cluster.java | 2 +- .../datastax/driver/stress/Generators.java | 83 ++++++++++++++----- .../driver/stress/QueryGenerator.java | 4 +- .../com/datastax/driver/stress/Stress.java | 49 +++++++---- 4 files changed, 101 insertions(+), 37 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 785a8a2766d..7f397b5d05d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -47,7 +47,7 @@ public class Cluster { static { org.apache.log4j.Logger rootLogger = org.apache.log4j.Logger.getRootLogger(); if (!rootLogger.getAllAppenders().hasMoreElements()) { - rootLogger.setLevel(Level.DEBUG); + rootLogger.setLevel(Level.INFO); rootLogger.addAppender(new ConsoleAppender(new PatternLayout("%-5p [%t]: %m%n"))); } } diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java index 2cf590a08ad..25ba98e23c2 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Generators.java @@ -1,22 +1,54 @@ package com.datastax.driver.examples.stress; +import java.nio.ByteBuffer; +import java.util.Random; + import com.datastax.driver.core.*; import com.datastax.driver.core.exceptions.*; +import joptsimple.OptionSet; + +import org.apache.cassandra.utils.ByteBufferUtil; + public class Generators { - public static final QueryGenerator.Builder SIMPLE_INSERTER = new QueryGenerator.Builder() { - public QueryGenerator create(final int iterations) { + private static ThreadLocal random = new ThreadLocal() { + protected Random initialValue() { + return new Random(); + } + }; + + private static void createCassandraStressTables(Session session, OptionSet options) throws NoHostAvailableException { + try { + session.execute("CREATE KEYSPACE stress WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); + } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + + session.execute("USE stress"); + + StringBuilder sb = new StringBuilder(); + sb.append("CREATE TABLE Standard1 (key int PRIMARY KEY"); + for (int i = 0; i < (Integer)options.valueOf("columns-per-row"); ++i) + sb.append(", C").append(i).append(" blob"); + sb.append(")"); + + try { + session.execute(sb.toString()); + } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + } + + private static ByteBuffer makeValue(OptionSet options) { + byte[] value = new byte[(Integer)options.valueOf("value-size")]; + random.get().nextBytes(value); + return ByteBuffer.wrap(value); + } + + public static final QueryGenerator.Builder CASSANDRA_INSERTER = new QueryGenerator.Builder() { + public QueryGenerator create(final int iterations, final OptionSet options) { return new QueryGenerator(iterations) { private int i; public void createSchema(Session session) throws NoHostAvailableException { - try { session.execute("CREATE KEYSPACE stress_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); } catch (AlreadyExistsException e) { /* It's ok, ignore */ } - session.execute("USE stress_ks"); - - try { - session.execute("CREATE TABLE stress_cf (k int, c int, v int, PRIMARY KEY (k, c))"); - } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + createCassandraStressTables(session, options); } public boolean hasNext() { @@ -24,9 +56,15 @@ public boolean hasNext() { } public QueryGenerator.Request next() { - String query = String.format("INSERT INTO stress_cf(k, c, v) VALUES (%d, %d, %d)", i, i, i); + StringBuilder sb = new StringBuilder(); + sb.append("UPDATE Standard1 SET "); + for (int i = 0; i < (Integer)options.valueOf("columns-per-row"); ++i) { + if (i > 0) sb.append(", "); + sb.append("C").append(i).append("='").append(ByteBufferUtil.bytesToHex(makeValue(options))).append("'"); + } + sb.append(" WHERE key = ").append(i); ++i; - return new QueryGenerator.Request.SimpleQuery(new SimpleStatement(query)); + return new QueryGenerator.Request.SimpleQuery(new SimpleStatement(sb.toString())); } public void remove() { @@ -36,21 +74,23 @@ public void remove() { } }; - public static final QueryGenerator.Builder SIMPLE_PREPARED_INSERTER = new QueryGenerator.Builder() { - public QueryGenerator create(final int iterations) { + public static final QueryGenerator.Builder CASSANDRA_PREPARED_INSERTER = new QueryGenerator.Builder() { + public QueryGenerator create(final int iterations, final OptionSet options) { return new QueryGenerator(iterations) { private int i; private PreparedStatement stmt; public void createSchema(Session session) throws NoHostAvailableException { - try { session.execute("CREATE KEYSPACE stress_ks WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); } catch (AlreadyExistsException e) { /* It's ok, ignore */ } - session.execute("USE stress_ks"); - - try { - session.execute("CREATE TABLE stress_cf (k int, c int, v int, PRIMARY KEY (k, c))"); - } catch (AlreadyExistsException e) { /* It's ok, ignore */ } + createCassandraStressTables(session, options); - stmt = session.prepare("INSERT INTO stress_cf(k, c, v) VALUES (?, ?, ?)"); + StringBuilder sb = new StringBuilder(); + sb.append("UPDATE Standard1 SET "); + for (int i = 0; i < (Integer)options.valueOf("columns-per-row"); ++i) { + if (i > 0) sb.append(", "); + sb.append("C").append(i).append("=?"); + } + sb.append(" WHERE key = ?"); + stmt = session.prepare(sb.toString()); } public boolean hasNext() { @@ -58,7 +98,10 @@ public boolean hasNext() { } public QueryGenerator.Request next() { - BoundStatement b = stmt.bind(i, i, i); + BoundStatement b = stmt.newBoundStatement(); + b.setInt("key", i); + for (int i = 0; i < (Integer)options.valueOf("columns-per-row"); ++i) + b.setBytes("c" + i, makeValue(options)); ++i; return new QueryGenerator.Request.PreparedQuery(b); } diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java index 80efd0a79a1..78f5e5b2204 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/QueryGenerator.java @@ -5,6 +5,8 @@ import com.datastax.driver.core.*; import com.datastax.driver.core.exceptions.*; +import joptsimple.OptionSet; + public abstract class QueryGenerator implements Iterator { static final Request DONE_MARKER = new Request() { @@ -21,7 +23,7 @@ protected QueryGenerator(int iterations) { public abstract void createSchema(Session session) throws NoHostAvailableException; public interface Builder { - public QueryGenerator create(int iterations); + public QueryGenerator create(int iterations, OptionSet options); } public interface Request { diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java index 715120aba6c..3294b0b922c 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java @@ -27,37 +27,56 @@ public static void register(String name, QueryGenerator.Builder generator) { generators.put(name, generator); } + private static void printHelp(OptionParser parser, Collection generators) throws Exception { + + System.out.println("Usage: stress [

* Once a BoundStatement has values for all the variables of the {@link PreparedStatement} * it has been created from, it can executed (through {@link Session#execute}). + *

+ * The values of a BoundStatement can be set by either index or name. When + * setting them by name, names follow the case insensitivity rules explained in + * {@link ColumnDefinitions}. */ public class BoundStatement extends Query { diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java index 1187eae5c7f..21f58d1f28c 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java @@ -9,6 +9,34 @@ /** * Metadata describing the columns returned in a {@link ResultSet} or a * {@link PreparedStatement}. + *

+ * A {@code columnDefinitions}} instance is mainly a list of + * {@code ColumnsDefinitions.Definition}. The definition/metadata for a column + * can be accessed in one of two ways: + *

    + *
  • by index
  • + *
  • by name
  • + *
+ *

+ * When accessed by name, column selection is case insentive. In case multiple + * columns only differ by the case of their name, then the column returned with + * be the one that has been defined in CQL without forcing case sensitivity + * (i.e. it has either been defined without quotes, or it is fully lowercase). + * If none of the columns have been thus defined, the first column matching + * (with case insensitivity) is returned. You can however always force the case + * of a selection by double quoting the name. + *

+ * So for instance: + *

    + *
  • If {@code cd} contains column {@code fOO}, then {@code cd.contains("foo")}, + * {@code cd.contains("fOO")} and {@code cd.contains("Foo")} will return {@code true}.
  • + *
  • If {@code cd} contains both of {@code foo} and {@code FOO} then: + *
      + *
    • {@code cd.getType("foo")}, {@code cd.getType("fOO")} and {@code cd.getType("FOO")} + * will all match column {@code foo}.
    • + *
    • {@code cd.getType("\"FOO\"")} will match column {@code FOO}
    • + *
    + *
*/ public class ColumnDefinitions implements Iterable { @@ -22,8 +50,22 @@ public class ColumnDefinitions implements Iterable this.byIdx = defs; this.byName = new HashMap(defs.length); - for (int i = 0; i < defs.length; i++) - this.byName.put(defs[i].name, i); + for (int i = 0; i < defs.length; i++) { + String name = defs[i].name; + String lowerCased = name.toLowerCase(); + Integer previous = this.byName.put(lowerCased, i); + if (previous != null) { + // We have 2 columns that only differ by case. If one has been defined with + // "case insensitivity", set this one, otherwise keep the first found. + if (name.equals(lowerCased)) { + assert !defs[previous].name.equals(lowerCased); + this.byName.put(defs[previous].name, previous); + } else { + this.byName.put(lowerCased, previous); + this.byName.put(name, i); + } + } + } } /** @@ -43,7 +85,7 @@ public int size() { * {@code false} otherwise. */ public boolean contains(String name) { - return byName.containsKey(name); + return findIdx(name) != null; } /** @@ -156,8 +198,16 @@ public String toString() { return sb.toString(); } + Integer findIdx(String name) { + String trimmed = name.trim(); + if (trimmed.length() >= 2 && trimmed.charAt(0) == '"' && trimmed.charAt(trimmed.length() - 1) == '"') + return byName.get(name.substring(1, trimmed.length() - 1)); + + return byName.get(name.toLowerCase()); + } + int getIdx(String name) { - Integer idx = byName.get(name); + Integer idx = findIdx(name); if (idx == null) throw new IllegalArgumentException(name + " is not a column defined in this metadata"); @@ -202,8 +252,7 @@ public static class Definition { private final String name; private final DataType type; - private Definition(String keyspace, String table, String name, DataType type) { - + Definition(String keyspace, String table, String name, DataType type) { this.keyspace = keyspace; this.table = table; this.name = name; diff --git a/driver-core/src/main/java/com/datastax/driver/core/DataType.java b/driver-core/src/main/java/com/datastax/driver/core/DataType.java index 06b9774bd5b..e59817eb31f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/DataType.java +++ b/driver-core/src/main/java/com/datastax/driver/core/DataType.java @@ -1,6 +1,7 @@ package com.datastax.driver.core; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Collections; import java.util.EnumMap; import java.util.List; @@ -321,6 +322,19 @@ public static Set allPrimitiveTypes() { return primitveTypeSet; } + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ name, typeArguments }); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof DataType)) + return false; + + return name == ((DataType)o).name && typeArguments.equals(((DataType)o).typeArguments); + } + @Override public String toString() { switch (name) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/Row.java b/driver-core/src/main/java/com/datastax/driver/core/Row.java index adbc2374705..afaac9590d4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Row.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Row.java @@ -12,6 +12,10 @@ /** * A CQL Row returned in a {@link ResultSet}. + *

+ * The values of a CQLRow can be retrieve by either index or name. When + * setting them by name, names follow the case insensitivity rules explained in + * {@link ColumnDefinitions}. */ public class Row { diff --git a/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java new file mode 100644 index 00000000000..bf9ff62ada9 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java @@ -0,0 +1,44 @@ +package com.datastax.driver.core; + +import org.junit.Test; +import static junit.framework.Assert.*; + +public class ColumnDefinitionsTest { + + @Test + public void caseTest() { + + ColumnDefinitions defs; + + defs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ + new ColumnDefinitions.Definition("ks", "cf", "aColumn", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "fOO", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "anotherColumn", DataType.text()) + }); + + assertTrue(defs.contains("foo")); + assertTrue(defs.contains("fOO")); + assertTrue(defs.contains("FOO")); + + defs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ + new ColumnDefinitions.Definition("ks", "cf", "aColumn", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "foo", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "anotherColumn", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "FOO", DataType.cint()), + new ColumnDefinitions.Definition("ks", "cf", "with \" quote", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "\"in quote\"", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf", "in quote", DataType.cint()), + }); + + assertTrue(defs.getType("foo").equals(DataType.text())); + assertTrue(defs.getType("Foo").equals(DataType.text())); + assertTrue(defs.getType("FOO").equals(DataType.text())); + assertTrue(defs.getType("\"FOO\"").equals(DataType.cint())); + + assertTrue(defs.contains("with \" quote")); + + assertTrue(defs.getType("in quote").equals(DataType.cint())); + assertTrue(defs.getType("\"in quote\"").equals(DataType.cint())); + assertTrue(defs.getType("\"\"in quote\"\"").equals(DataType.text())); + } +} From 0e7e3e57392f4947db8e9d0b89e333a2b2d38018 Mon Sep 17 00:00:00 2001 From: Dave Brosius Date: Mon, 26 Nov 2012 23:32:28 -0500 Subject: [PATCH 094/719] don't return bad connections (null or dups) to the pool --- .../com/datastax/driver/core/RetryingCallback.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index f87a4b789d2..8b13cd109f9 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -79,22 +79,25 @@ private boolean query(Host host) { return true; } catch (ConnectionException e) { // If we have any problem with the connection, move to the next node. - currentPool.returnConnection(connection); + if (connection != null) + currentPool.returnConnection(connection); logError(host.getAddress(), e.getMessage()); return false; } catch (BusyConnectionException e) { // The pool shoudln't have give us a busy connection unless we've maxed up the pool, so move on to the next host. - currentPool.returnConnection(connection); + if (connection != null) + currentPool.returnConnection(connection); logError(host.getAddress(), e.getMessage()); return false; } catch (TimeoutException e) { // We timeout, log it but move to the next node. - currentPool.returnConnection(connection); + if (connection != null) + currentPool.returnConnection(connection); logError(host.getAddress(), "Timeout while trying to acquire available connection"); - currentPool.returnConnection(connection); return false; } catch (RuntimeException e) { - currentPool.returnConnection(connection); + if (connection != null) + currentPool.returnConnection(connection); logger.error("Unexpected error while querying " + host.getAddress(), e); logError(host.getAddress(), e.getMessage()); return false; From fe679549eee407ab9bd48d7ebb72140d52281d51 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 28 Nov 2012 10:32:20 +0100 Subject: [PATCH 095/719] Handle having the same column multiple time in ColumnDefinitions --- .../datastax/driver/core/BoundStatement.java | 38 ++++---- .../driver/core/ColumnDefinitions.java | 91 +++++++++++++------ .../datastax/driver/core/SocketOptions.java | 1 - .../core/utils/querybuilder/QueryBuilder.java | 2 +- .../driver/core/ColumnDefinitionsTest.java | 12 +++ 5 files changed, 98 insertions(+), 46 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index ec8722a6688..a59e3dd7a16 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -18,7 +18,11 @@ *

* The values of a BoundStatement can be set by either index or name. When * setting them by name, names follow the case insensitivity rules explained in - * {@link ColumnDefinitions}. + * {@link ColumnDefinitions}. Noteworthily, if multiple bind variables + * correspond to the same column (as would be the case if you prepare + * {@code SELECT * FROM t WHERE x > ? AND x < ?}), you will have to set + * values by indexes (or the {@link #bind} method) as the methods to set by + * name only allows to set the first prepared occurrence of the column. */ public class BoundStatement extends Query { @@ -205,7 +209,7 @@ public BoundStatement setBool(int i, boolean v) { } /** - * Set the value for column {@code name} to the provided boolean. + * Set the value for (the first occurrence of) column {@code name} to the provided boolean. * * @return this BoundStatement. * @@ -231,7 +235,7 @@ public BoundStatement setInt(int i, int v) { } /** - * Set the value for column {@code name} to the provided integer. + * Set the value for (the first occurrence of) column {@code name} to the provided integer. * * @return this BoundStatement. * @@ -257,7 +261,7 @@ public BoundStatement setLong(int i, long v) { } /** - * Set the value for column {@code name} to the provided long. + * Set the value for (the first occurrence of) column {@code name} to the provided long. * * @return this BoundStatement. * @@ -283,7 +287,7 @@ public BoundStatement setDate(int i, Date v) { } /** - * Set the value for column {@code name} to the provided date. + * Set the value for (the first occurrence of) column {@code name} to the provided date. * * @return this BoundStatement. * @@ -309,7 +313,7 @@ public BoundStatement setFloat(int i, float v) { } /** - * Set the value for column {@code name} to the provided float. + * Set the value for (the first occurrence of) column {@code name} to the provided float. * * @return this BoundStatement. * @@ -335,7 +339,7 @@ public BoundStatement setDouble(int i, double v) { } /** - * Set the value for column {@code name} to the provided double. + * Set the value for (the first occurrence of) column {@code name} to the provided double. * * @return this BoundStatement. * @@ -372,7 +376,7 @@ public BoundStatement setString(int i, String v) { } /** - * Set the value for column {@code name} to the provided string. + * Set the value for (the first occurrence of) column {@code name} to the provided string. * * @return this BoundStatement. * @@ -403,7 +407,7 @@ public BoundStatement setBytes(int i, ByteBuffer v) { } /** - * Set the value for column {@code name} to the provided byte buffer. + * Set the value for (the first occurrence of) column {@code name} to the provided byte buffer. * * This method validate that the type of the column set is BLOB. If you * want to insert manually serialized data into columns of another type, @@ -436,7 +440,7 @@ public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { } /** - * Set the value for column {@code name} to the provided byte buffer. + * Set the value for (the first occurrence of) column {@code name} to the provided byte buffer. * * Contrarily to {@link #setBytes}, this method does not check the * type of the column set. If you insert data that is not compatible with @@ -466,7 +470,7 @@ public BoundStatement setVarint(int i, BigInteger v) { } /** - * Set the value for column {@code name} to the provided big integer. + * Set the value for (the first occurrence of) column {@code name} to the provided big integer. * * @return this BoundStatement. * @@ -492,7 +496,7 @@ public BoundStatement setDecimal(int i, BigDecimal v) { } /** - * Set the value for column {@code name} to the provided big decimal. + * Set the value for (the first occurrence of) column {@code name} to the provided big decimal. * * @return this BoundStatement. * @@ -527,7 +531,7 @@ public BoundStatement setUUID(int i, UUID v) { } /** - * Set the value for column {@code name} to the provided UUID. + * Set the value for (the first occurrence of) column {@code name} to the provided UUID. * * @return this BoundStatement. * @@ -555,7 +559,7 @@ public BoundStatement setInet(int i, InetAddress v) { } /** - * Set the value for column {@code name} to the provided inet address. + * Set the value for (the first occurrence of) column {@code name} to the provided inet address. * * @return this BoundStatement. * @@ -595,7 +599,7 @@ public BoundStatement setList(int i, List v) { } /** - * Set the value for column {@code name} to the provided list. + * Set the value for (the first occurrence of) column {@code name} to the provided list. * * @return this BoundStatement. * @@ -640,7 +644,7 @@ public BoundStatement setMap(int i, Map v) { } /** - * Set the value for column {@code name} to the provided map. + * Set the value for (the first occurrence of) column {@code name} to the provided map. * * @return this BoundStatement. * @@ -681,7 +685,7 @@ public BoundStatement setSet(int i, Set v) { } /** - * Set the value for column {@code name} to the provided set. + * Set the value for (the first occurrence of) column {@code name} to the provided set. * * @return this BoundStatement. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java index 21f58d1f28c..b5313912a00 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ColumnDefinitions.java @@ -18,13 +18,13 @@ *

  • by name
  • * *

    - * When accessed by name, column selection is case insentive. In case multiple + * When accessed by name, column selection is case insensitive. In case multiple * columns only differ by the case of their name, then the column returned with - * be the one that has been defined in CQL without forcing case sensitivity - * (i.e. it has either been defined without quotes, or it is fully lowercase). + * be the first column that has been defined in CQL without forcing case sensitivity + * (i.e. it has either been defined without quotes or is fully lowercase). * If none of the columns have been thus defined, the first column matching - * (with case insensitivity) is returned. You can however always force the case - * of a selection by double quoting the name. + * (with case insensitivity) is returned. You can force the case of a selection + * by double quoting the name. *

    * So for instance: *

      @@ -37,33 +37,31 @@ *
    • {@code cd.getType("\"FOO\"")} will match column {@code FOO}
    • *
    * + * Note that the rules above means that if a {@code ColumnDefinitions} object + * contains multiple occurences of the exact same name (be it the same column + * multiple time, or columns from different tables with the same name), you + * will have to use selection by index to disambiguate. */ public class ColumnDefinitions implements Iterable { static final ColumnDefinitions EMPTY = new ColumnDefinitions(new Definition[0]); private final Definition[] byIdx; - private final Map byName; + private final Map byName; ColumnDefinitions(Definition[] defs) { this.byIdx = defs; - this.byName = new HashMap(defs.length); + this.byName = new HashMap(defs.length); for (int i = 0; i < defs.length; i++) { - String name = defs[i].name; - String lowerCased = name.toLowerCase(); - Integer previous = this.byName.put(lowerCased, i); + // Be optimistic, 99% of the time, previous will be null. + int[] previous = this.byName.put(defs[i].name.toLowerCase(), new int[]{ i }); if (previous != null) { - // We have 2 columns that only differ by case. If one has been defined with - // "case insensitivity", set this one, otherwise keep the first found. - if (name.equals(lowerCased)) { - assert !defs[previous].name.equals(lowerCased); - this.byName.put(defs[previous].name, previous); - } else { - this.byName.put(lowerCased, previous); - this.byName.put(name, i); - } + int[] indexes = new int[previous.length + 1]; + System.arraycopy(previous, 0, indexes, 0, previous.length); + indexes[indexes.length - 1] = i; + this.byName.put(defs[i].name.toLowerCase(), indexes); } } } @@ -85,7 +83,7 @@ public int size() { * {@code false} otherwise. */ public boolean contains(String name) { - return findIdx(name) != null; + return findIdx(name) > 0; } /** @@ -198,17 +196,39 @@ public String toString() { return sb.toString(); } - Integer findIdx(String name) { - String trimmed = name.trim(); - if (trimmed.length() >= 2 && trimmed.charAt(0) == '"' && trimmed.charAt(trimmed.length() - 1) == '"') - return byName.get(name.substring(1, trimmed.length() - 1)); + int findIdx(String name) { + boolean caseSensitive = false; + if (name.length() >= 2 && name.charAt(0) == '"' && name.charAt(name.length() - 1) == '"') { + name = name.substring(1, name.length() - 1); + caseSensitive = true; + } - return byName.get(name.toLowerCase()); + int[] indexes = byName.get(name.toLowerCase()); + if (indexes == null) { + return -1; + } else if (indexes.length == 1) { + return indexes[0]; + } else { + for (int i = 0; i < indexes.length; i++) { + int idx = indexes[i]; + if (caseSensitive) { + if (name.equals(byIdx[idx].name)) + return idx; + } else { + if (name.toLowerCase().equals(byIdx[idx].name)) + return idx; + } + } + if (caseSensitive) + return -1; + else + return indexes[0]; + } } int getIdx(String name) { - Integer idx = findIdx(name); - if (idx == null) + int idx = findIdx(name); + if (idx < 0) throw new IllegalArgumentException(name + " is not a column defined in this metadata"); return idx; @@ -298,5 +318,22 @@ public String getName() { public DataType getType() { return type; } + + @Override + public final int hashCode() { + return Arrays.hashCode(new Object[]{ keyspace, table, name, type}); + } + + @Override + public final boolean equals(Object o) { + if(!(o instanceof Definition)) + return false; + + Definition other = (Definition)o; + return keyspace.equals(other.keyspace) + && table.equals(other.table) + && name.equals(other.name) + && type.equals(other.type); + } } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java index de98b952377..957a0762496 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java @@ -72,4 +72,3 @@ public void setSendBufferSize(int sendBufferSize) { this.sendBufferSize = sendBufferSize; } } - diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index 78af7ee5ffa..6dd36df2729 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -18,7 +18,7 @@ *

    * Note that it could be convenient to use an 'import static' to use the methods of this class. */ -public abstract class QueryBuilder { +public final class QueryBuilder { private QueryBuilder() {} diff --git a/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java index bf9ff62ada9..ef87d2891cc 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java +++ b/driver-core/src/test/java/com/datastax/driver/core/ColumnDefinitionsTest.java @@ -41,4 +41,16 @@ public void caseTest() { assertTrue(defs.getType("\"in quote\"").equals(DataType.cint())); assertTrue(defs.getType("\"\"in quote\"\"").equals(DataType.text())); } + + @Test + public void multiDefinitionTest() { + + ColumnDefinitions defs = new ColumnDefinitions(new ColumnDefinitions.Definition[]{ + new ColumnDefinitions.Definition("ks", "cf1", "column", DataType.text()), + new ColumnDefinitions.Definition("ks", "cf2", "column", DataType.cint()), + new ColumnDefinitions.Definition("ks", "cf3", "column", DataType.cfloat()) + }); + + assertTrue(defs.getType("column").equals(DataType.text())); + } } From 82dd6a37de1a2aab6e9d023a4e7312b01fdb2120 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 28 Nov 2012 10:50:51 +0100 Subject: [PATCH 096/719] javadoc updates --- .../datastax/driver/core/BoundStatement.java | 90 ++++++++++++++++++- 1 file changed, 87 insertions(+), 3 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java index a59e3dd7a16..1d2113defa7 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/BoundStatement.java @@ -58,8 +58,10 @@ public boolean isReady() { /** * Returns whether the {@code i}th variable has been bound to a value. * - * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. + * @param i the index of the variable to check. * @return whether the {@code i}th variable has been bound to a value. + * + * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. */ public boolean isSet(int i) { metadata().checkBounds(i); @@ -67,11 +69,13 @@ public boolean isSet(int i) { } /** - * Returns whether the variable {@code name} has been bound to a value. + * Returns whether the (first occurrence of) variable {@code name} has been bound to a value. + * + * @param name the name of the variable to check. + * @return whether the (first occurrence of) variable {@code name} has been bound to a value. * * @throws IllegalArgumentException if {@code name} is not a prepared * variable, i.e. if {@code !this.preparedStatement().variables().names().contains(name)}. - * @return whether the {@code i}th variable has been bound to a value. */ public boolean isSet(String name) { return isSet(metadata().getIdx(name)); @@ -198,6 +202,8 @@ public ByteBuffer getRoutingKey() { /** * Set the {@code i}th value to the provided boolean. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -211,6 +217,9 @@ public BoundStatement setBool(int i, boolean v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided boolean. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -224,6 +233,8 @@ public BoundStatement setBool(String name, boolean v) { /** * Set the {@code i}th value to the provided integer. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -237,6 +248,9 @@ public BoundStatement setInt(int i, int v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided integer. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -250,6 +264,8 @@ public BoundStatement setInt(String name, int v) { /** * Set the {@code i}th value to the provided long. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -263,6 +279,9 @@ public BoundStatement setLong(int i, long v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided long. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -276,6 +295,8 @@ public BoundStatement setLong(String name, long v) { /** * Set the {@code i}th value to the provided date. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -289,6 +310,9 @@ public BoundStatement setDate(int i, Date v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided date. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -302,6 +326,8 @@ public BoundStatement setDate(String name, Date v) { /** * Set the {@code i}th value to the provided float. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -315,6 +341,9 @@ public BoundStatement setFloat(int i, float v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided float. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -328,6 +357,8 @@ public BoundStatement setFloat(String name, float v) { /** * Set the {@code i}th value to the provided double. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -341,6 +372,9 @@ public BoundStatement setDouble(int i, double v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided double. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -354,6 +388,8 @@ public BoundStatement setDouble(String name, double v) { /** * Set the {@code i}th value to the provided string. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -378,6 +414,9 @@ public BoundStatement setString(int i, String v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided string. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -396,6 +435,8 @@ public BoundStatement setString(String name, String v) { * want to insert manually serialized data into columns of another type, * use {@link #setBytesUnsafe} instead. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -413,6 +454,9 @@ public BoundStatement setBytes(int i, ByteBuffer v) { * want to insert manually serialized data into columns of another type, * use {@link #setBytesUnsafe} instead. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -431,6 +475,8 @@ public BoundStatement setBytes(String name, ByteBuffer v) { * the type of the column, you will get an {@code InvalidQueryException} at * execute time. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -447,6 +493,9 @@ public BoundStatement setBytesUnsafe(int i, ByteBuffer v) { * the type of the column, you will get an {@code InvalidQueryException} at * execute time. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -459,6 +508,8 @@ public BoundStatement setBytesUnsafe(String name, ByteBuffer v) { /** * Set the {@code i}th value to the provided big integer. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -472,6 +523,9 @@ public BoundStatement setVarint(int i, BigInteger v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided big integer. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -485,6 +539,8 @@ public BoundStatement setVarint(String name, BigInteger v) { /** * Set the {@code i}th value to the provided big decimal. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -498,6 +554,9 @@ public BoundStatement setDecimal(int i, BigDecimal v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided big decimal. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -511,6 +570,8 @@ public BoundStatement setDecimal(String name, BigDecimal v) { /** * Set the {@code i}th value to the provided UUID. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -533,6 +594,9 @@ public BoundStatement setUUID(int i, UUID v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided UUID. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -548,6 +612,8 @@ public BoundStatement setUUID(String name, UUID v) { /** * Set the {@code i}th value to the provided inet address. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -561,6 +627,9 @@ public BoundStatement setInet(int i, InetAddress v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided inet address. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -574,6 +643,8 @@ public BoundStatement setInet(String name, InetAddress v) { /** * Set the {@code i}th value to the provided list. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -601,6 +672,9 @@ public BoundStatement setList(int i, List v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided list. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -616,6 +690,8 @@ public BoundStatement setList(String name, List v) { /** * Set the {@code i}th value to the provided map. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -646,6 +722,9 @@ public BoundStatement setMap(int i, Map v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided map. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared @@ -661,6 +740,8 @@ public BoundStatement setMap(String name, Map v) { /** * Set the {@code i}th value to the provided set. * + * @param i the index of the variable to set. + * @param v the value to set. * @return this BoundStatement. * * @throws IndexOutOfBoundsException if {@code i < 0 || i >= this.preparedStatement().variables().size()}. @@ -687,6 +768,9 @@ public BoundStatement setSet(int i, Set v) { /** * Set the value for (the first occurrence of) column {@code name} to the provided set. * + * @param name the name of the variable to set (if multiple variables + * {@code name} are prepared, only the first one is set). + * @param v the value to set. * @return this BoundStatement. * * @throws IllegalArgumentException if {@code name} is not a prepared From 1c3757938c21966762fa0112b29b898f7cee262c Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Wed, 28 Nov 2012 18:07:15 +0100 Subject: [PATCH 097/719] Add utility class to work with time-based UUIDs --- .../com/datastax/driver/core/utils/UUIDs.java | 277 ++++++++++++++++++ .../datastax/driver/core/utils/UUIDsTest.java | 120 ++++++++ 2 files changed, 397 insertions(+) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java create mode 100644 driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java new file mode 100644 index 00000000000..7b50e1cd9f9 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java @@ -0,0 +1,277 @@ +package com.datastax.driver.core.utils; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Calendar; +import java.util.Properties; +import java.util.Random; +import java.util.TimeZone; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Utility methods to work with UUID and most specifically with time-based ones + * (version 1). + */ +public final class UUIDs { + + // http://www.ietf.org/rfc/rfc4122.txt + private static final long START_EPOCH = makeEpoch(); + private static final long CLOCK_SEQ_AND_NODE = makeClockSeqAndNode(); + + /* + * The min and max possible lsb for a UUID. + * Note that his is not 0 and all 1's because Cassandra TimeUUIDType + * compares the lsb parts as a signed byte array comparison. So the min + * value is 8 times -128 and the max is 8 times +127. + * + * Note that we ignore the uuid variant (namely, MIN_CLOCK_SEQ_AND_NODE + * have variant 2 as it should, but MAX_CLOCK_SEQ_AND_NODE have variant 0) + * because I don't trust all uuid implementation to have correctly set + * those (pycassa don't always for instance). + */ + private static final long MIN_CLOCK_SEQ_AND_NODE = 0x8080808080808080L; + private static final long MAX_CLOCK_SEQ_AND_NODE = 0x7f7f7f7f7f7f7f7fL; + + private static final AtomicLong lastTimestamp = new AtomicLong(0L); + + private static long makeEpoch() { + // UUID v1 timestamp must be in 100-nanoseconds interval since 00:00:00.000 15 Oct 1582. + Calendar c = Calendar.getInstance(TimeZone.getTimeZone("GMT-0")); + c.set(Calendar.YEAR, 1582); + c.set(Calendar.MONTH, Calendar.OCTOBER); + c.set(Calendar.DAY_OF_MONTH, 15); + c.set(Calendar.HOUR_OF_DAY, 0); + c.set(Calendar.MINUTE, 0); + c.set(Calendar.SECOND, 0); + c.set(Calendar.MILLISECOND, 0); + return c.getTimeInMillis(); + } + + private static long makeNode() { + + // ideally, we'd use the MAC address, but java doesn't expose that. So + // instead gather a number of local information and hash that. + InetAddress local; + try { + local = InetAddress.getLocalHost(); + } catch (UnknownHostException e) { + try { + local = InetAddress.getByAddress(new byte[]{ 127, 0, 0, 1 }); + } catch (UnknownHostException uhe) { + throw new RuntimeException(uhe); + } + } + + Properties props = System.getProperties(); + + byte[] hash = hash(local.toString(), + props.getProperty("java.vendor"), + props.getProperty("java.vendor.url"), + props.getProperty("java.version"), + props.getProperty("os.arch"), + props.getProperty("os.name"), + props.getProperty("os.version")); + + long node = 0; + for (int i = 0; i < 6; i++) + node |= (0x00000000000000ffL & (long)hash[i]) << (i*8); + // Since we haven't use the mac address, the spec says that the first bit must be 1. + return node & 0x8000000000000000L; + } + + private static byte[] hash(String... data) { + + MessageDigest digest; + try { + digest = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + + for (String block : data) + if (block != null) + digest.update(block.getBytes()); + + return digest.digest(); + } + + private static long makeClockSeqAndNode() { + long clock = new Random(System.currentTimeMillis()).nextLong(); + long node = makeNode(); + + long lsb = 0; + lsb |= (clock & 0x00000000000000FFL) << 48; + lsb |= (clock & 0x0000000000003F00L) << 48; + lsb |= 0x8000000000000000L; + lsb |= node; + return lsb; + } + + /** + * Creates a new random (version 4) UUID. + *

    + * This method is just a convenience for {@code UUID.randomUUID()}. + * + * @return a newly generated, pseudo random, version 4 UUID. + */ + public static UUID random() { + return UUID.randomUUID(); + } + + /** + * Creates a new time-based (version 1) UUID. + *

    + * UUID generated by this method are suitable for use with the + * {@code timeuuid} Cassandra type. In particular the generated UUID + * includes the timestamp of its generation. + * + * @return a new time-based UUID. + */ + public static UUID timeBased() { + return new UUID(makeMSB(getCurrentTimestamp()), CLOCK_SEQ_AND_NODE); + } + + /** + * Creates a "fake" time-based UUID that sorts as the smallest possible + * version 1 UUID generated at the provided timestamp. + *

    + * Such created UUID are useful in queries to select a time range of a + * {@code timeuuid} column. + *

    + * The UUID created by this method are not unique and as such are + * not suitable for anything else than querying a specific time + * range. In particular, you should not insert such UUID. + *

    + * Also, the timestamp to provide as parameter must be a unix timestamp (as + * returned by {@link System#currentTimeMillis} or {@link java.util.Date#getTime}), + * not a UUID 100-nanoseconds intervals since 15 October 1582. In other + * words, given a UUID {@code uuid}, you should never do + * {@code startOf(uuid.timestamp())} but rather + * {@code startOf(unixTimestamp(uuid.timestamp()))}. + *

    + * Lastly, please note that Cassandra's timeuuid sorting is not compatible + * with {@link UUID#compareTo} and hence the UUID created by this method + * are not necesssary lower bound for that latter method. + * + * @param timestamp the unix timestamp for which the created UUID must be a + * lower bound. + * @return the smallest (for Cassandra timeuuid sorting) UUID of {@code timestamp}. + */ + public static UUID startOf(long timestamp) { + return new UUID(makeMSB(fromUnixTimestamp(timestamp)), MIN_CLOCK_SEQ_AND_NODE); + } + + /** + * Creates a "fake" time-based UUID that sorts as the biggest possible + * version 1 UUID generated at the provided timestamp. + *

    + * Such created UUID are useful in queries to select a time range of a + * {@code timeuuid} column. + *

    + * The UUID created by this method are not unique and as such are + * not suitable for anything else than querying a specific time + * range. In particular, you should not insert such UUID. + *

    + * Also, the timestamp to provide as parameter must be a unix timestamp (as + * returned by {@link System#currentTimeMillis} or {@link java.util.Date#getTime}), + * not a UUID 100-nanoseconds intervals since 15 October 1582. In other + * words, given a UUID {@code uuid}, you should never do + * {@code startOf(uuid.timestamp())} but rather + * {@code startOf(unixTimestamp(uuid.timestamp()))}. + *

    + * Lastly, please note that Cassandra's timeuuid sorting is not compatible + * with {@link UUID#compareTo} and hence the UUID created by this method + * are not necesssary upper bound for that latter method. + * + * @param timestamp the unix timestamp for which the created UUID must be an + * upper bound. + * @return the biggest (for Cassandra timeuuid sorting) UUID of {@code timestamp}. + */ + public static UUID endOf(long timestamp) { + long uuidTstamp = fromUnixTimestamp(timestamp + 1) - 1; + return new UUID(makeMSB(uuidTstamp), MAX_CLOCK_SEQ_AND_NODE); + } + + /** + * Return the unix timestamp contained by the provided time-based UUID. + *

    + * This method is not equivalent to {@code uuid.timestamp()}. More + * precisely, a version 1 UUID stores a timestamp that represents the + * number of 100-nanoseconds intervals since midnight, 15 October 1582 and + * that is what {@code uuid.timestamp()} returns. This method however + * converts that timestamp to the equivalent unix timestamp in + * milliseconds, i.e. a timestamp representing a number of milliseconds + * since midnight, January 1, 1970 UTC. In particular the timestamps + * returned by this method are comparable to the timestamp returned by + * {@link System#currentTimeMillis}, {@link java.util.Date#getTime}, etc. + * + * @param uuid the UUID to return the timestamp of. + * @return the unix timestamp of {@code uuid}. + * + * @throws IllegalArgumentException if {@code uuid} is not a version 1 UUID. + */ + public static long unixTimestamp(UUID uuid) { + if (uuid.version() != 1) + throw new IllegalArgumentException(String.format("Can only retrieve the unix timestamp for version 1 uuid (provided version %d)", uuid.version())); + + long timestamp = uuid.timestamp(); + return (timestamp / 10000) + START_EPOCH; + } + + /* + * Note that currently we use System.currentTimeMillis() for a base time in + * milliseconds, and then if we are in the same milliseconds that the + * previous generation, we increment the number of nanoseconds. + * However, since the precision is 100-nanoseconds intervals, we can only + * generate 10K UUID within a millisecond safely. If we detect we have + * already generated that much UUID within a millisecond (which, while + * admittedly unlikely in a real application, is very achievable on even + * modest machines), then we stall the generator (busy spin) until the next + * millisecond as required by the RFC. + */ + private static long getCurrentTimestamp() { + while (true) { + long now = fromUnixTimestamp(System.currentTimeMillis()); + long last = lastTimestamp.get(); + if (now > last) { + if (lastTimestamp.compareAndSet(last, now)) + return now; + } else { + long lastMillis = millisOf(last); + // If the clock went back in time, bail out + if (millisOf(now) < millisOf(last)) + return lastTimestamp.incrementAndGet(); + + long candidate = last + 1; + // If we've generated more than 10k uuid in that millisecond, + // we restart the whole process until we get to the next millis. + // Otherwise, we try use our candidate ... unless we've been + // beaten by another thread in which case we try again. + if (millisOf(candidate) == lastMillis && lastTimestamp.compareAndSet(last, candidate)) + return candidate; + } + } + } + + // Package visible for testing + static long fromUnixTimestamp(long tstamp) { + return (tstamp - START_EPOCH) * 10000; + } + + private static long millisOf(long timestamp) { + return timestamp / 10000; + } + + // Package visible for testing + static long makeMSB(long timestamp) { + long msb = 0L; + msb |= (0x00000000ffffffffL & timestamp) << 32; + msb |= (0x0000ffff00000000L & timestamp) >>> 16; + msb |= (0x0fff000000000000L & timestamp) >>> 48; + msb |= 0x0000000000001000L; // sets the version to 1. + return msb; + } +} diff --git a/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java new file mode 100644 index 00000000000..edf11e90751 --- /dev/null +++ b/driver-core/src/test/java/com/datastax/driver/core/utils/UUIDsTest.java @@ -0,0 +1,120 @@ +package com.datastax.driver.core.utils; + +import java.nio.ByteBuffer; +import java.util.HashSet; +import java.util.Random; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ConcurrentSkipListSet; + +import org.junit.Test; +import static org.junit.Assert.*; + +import org.apache.cassandra.db.marshal.TimeUUIDType; + +public class UUIDsTest { + + @Test + public void conformanceTest() { + + // The UUIDs class does some computation at class initialization, which + // may screw up our assumption below that UUIDs.timeBased() takes less + // than 10ms, so force class loading now. + UUIDs.random(); + + long now = System.currentTimeMillis(); + UUID uuid = UUIDs.timeBased(); + + assertEquals(1, uuid.version()); + assertEquals(2, uuid.variant()); + + long tstamp = UUIDs.unixTimestamp(uuid); + + // Check now and the uuid timestamp are within 10 millisseconds. + assert now <= tstamp && now >= tstamp - 10 : String.format("now = %d, tstamp = %d", now, tstamp); + } + + @Test + public void uniquenessTest() { + // Generate 1M uuid and check we never have twice the same one + + int nbGenerated = 1000000; + Set generated = new HashSet(nbGenerated); + + for (int i = 0; i < nbGenerated; ++i) + generated.add(UUIDs.timeBased()); + + assertEquals(nbGenerated, generated.size()); + } + + @Test + public void multiThreadUniquenessTest() throws Exception { + int nbThread = 10; + int nbGenerated = 500000; + Set generated = new ConcurrentSkipListSet(); + + UUIDGenerator[] generators = new UUIDGenerator[nbThread]; + for (int i = 0; i < nbThread; i++) + generators[i] = new UUIDGenerator(nbGenerated, generated); + + for (int i = 0; i < nbThread; i++) + generators[i].start(); + + for (int i = 0; i < nbThread; i++) + generators[i].join(); + + assertEquals(nbThread * nbGenerated, generated.size()); + } + + @Test + public void timestampIncreasingTest() { + // Generate 1M uuid and check timestamp are always increasing + int nbGenerated = 1000000; + long previous = 0; + + for (int i = 0; i < nbGenerated; ++i) { + long current = UUIDs.timeBased().timestamp(); + assert previous < current : String.format("previous = %d >= %d = current", previous, current); + } + } + + @Test + public void startEndOfTest() { + + Random random = new Random(System.currentTimeMillis()); + + int nbTstamp = 10; + int nbPerTstamp = 10; + + for (int i = 0; i < nbTstamp; i++) { + long tstamp = (long)random.nextInt(); + for (int j = 0; j < nbPerTstamp; j++) { + assertWithin(new UUID(UUIDs.makeMSB(UUIDs.fromUnixTimestamp(tstamp)), random.nextLong()), UUIDs.startOf(tstamp), UUIDs.endOf(tstamp)); + } + } + } + + private static void assertWithin(UUID uuid, UUID lowerBound, UUID upperBound) { + ByteBuffer uuidBytes = TimeUUIDType.instance.decompose(uuid); + ByteBuffer lb = TimeUUIDType.instance.decompose(lowerBound); + ByteBuffer ub = TimeUUIDType.instance.decompose(upperBound); + assertTrue(TimeUUIDType.instance.compare(lb, uuidBytes) <= 0); + assertTrue(TimeUUIDType.instance.compare(ub, uuidBytes) >= 0); + } + + private static class UUIDGenerator extends Thread { + + private final int toGenerate; + private final Set generated; + + UUIDGenerator(int toGenerate, Set generated) { + this.toGenerate = toGenerate; + this.generated = generated; + } + + public void run() { + for (int i = 0; i < toGenerate; ++i) + generated.add(UUIDs.timeBased()); + } + } +} From 049d81064ea638e9ac5af288de145229ea1bd35f Mon Sep 17 00:00:00 2001 From: Dave Brosius Date: Thu, 29 Nov 2012 01:53:06 -0500 Subject: [PATCH 098/719] avoid NPE if colsDefs is null --- .../java/com/datastax/driver/core/ClusterMetadata.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java index f4d9e6d3552..bd46c73d25a 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java @@ -104,15 +104,16 @@ synchronized void rebuildSchema(String keyspace, String table, ResultSet ks, Res } private static void buildTableMetadata(KeyspaceMetadata ksm, List cfRows, Map> colsDefs) { + boolean hasColumns = (colsDefs != null) && !colsDefs.isEmpty(); for (Row cfRow : cfRows) { String cfName = cfRow.getString(TableMetadata.CF_NAME); - TableMetadata tm = TableMetadata.build(ksm, cfRow, !colsDefs.isEmpty()); + TableMetadata tm = TableMetadata.build(ksm, cfRow, hasColumns); - if (colsDefs == null || colsDefs.get(cfName) == null) + if (!hasColumns || colsDefs.get(cfName) == null) continue; for (Row colRow : colsDefs.get(cfName)) { - ColumnMetadata cm = ColumnMetadata.build(tm, colRow); + ColumnMetadata.build(tm, colRow); } } } From 2d7ee018eac1f43a34228177175e29592e04a186 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 29 Nov 2012 10:37:00 +0100 Subject: [PATCH 099/719] Minor updates/bug fixes --- .../main/java/com/datastax/driver/core/ResultSetFuture.java | 2 +- .../main/java/com/datastax/driver/core/RetryingCallback.java | 2 -- .../src/main/java/com/datastax/driver/core/utils/UUIDs.java | 3 +-- .../datastax/driver/core/utils/querybuilder/QueryBuilder.java | 2 +- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java index ac1dcc0ab21..c72fd05c43e 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ResultSetFuture.java @@ -173,7 +173,7 @@ public ResultSet getUninterruptibly(long timeout, TimeUnit unit) throws NoHostAv // We said 'uninterruptibly' long now = System.nanoTime(); long elapsedNanos = now - start; - timeout = timeoutNanos - elapsedNanos; + timeoutNanos = timeoutNanos - elapsedNanos; start = now; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index 8b13cd109f9..fa6f08a4206 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -91,8 +91,6 @@ private boolean query(Host host) { return false; } catch (TimeoutException e) { // We timeout, log it but move to the next node. - if (connection != null) - currentPool.returnConnection(connection); logError(host.getAddress(), "Timeout while trying to acquire available connection"); return false; } catch (RuntimeException e) { diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java index 7b50e1cd9f9..d6b7067e8a3 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java @@ -103,8 +103,7 @@ private static long makeClockSeqAndNode() { long node = makeNode(); long lsb = 0; - lsb |= (clock & 0x00000000000000FFL) << 48; - lsb |= (clock & 0x0000000000003F00L) << 48; + lsb |= (clock & 0x0000000000003FFFL) << 48; lsb |= 0x8000000000000000L; lsb |= node; return lsb; diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index 6dd36df2729..c25dd233ab4 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -54,7 +54,7 @@ public static String[] all() { * @return an array containing "count(*)" as sole element. */ public static String[] count() { - return COUNT_ALL; + return Arrays.copyOf(COUNT_ALL, COUNT_ALL.length); } /** From 64bd74a10f60fd00b7a9a61635f22ee150bbb399 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 29 Nov 2012 10:37:52 +0100 Subject: [PATCH 100/719] Fix build --- .../datastax/driver/core/utils/querybuilder/QueryBuilder.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java index c25dd233ab4..84676f2f370 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/querybuilder/QueryBuilder.java @@ -1,5 +1,7 @@ package com.datastax.driver.core.utils.querybuilder; +import java.util.Arrays; + import com.datastax.driver.core.Statement; import com.datastax.driver.core.TableMetadata; From 13edfdc07144c66e04b4aeebdf3a2c92407a1f30 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Thu, 29 Nov 2012 12:45:07 +0100 Subject: [PATCH 101/719] Slight improvement of UUID generation --- .../com/datastax/driver/core/utils/UUIDs.java | 102 ++++++++++++------ 1 file changed, 67 insertions(+), 35 deletions(-) diff --git a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java index d6b7067e8a3..3b75437cdb2 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java +++ b/driver-core/src/main/java/com/datastax/driver/core/utils/UUIDs.java @@ -1,12 +1,17 @@ package com.datastax.driver.core.utils; import java.net.InetAddress; +import java.net.NetworkInterface; import java.net.UnknownHostException; +import java.net.SocketException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Calendar; +import java.util.Enumeration; +import java.util.HashSet; import java.util.Properties; import java.util.Random; +import java.util.Set; import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; @@ -52,50 +57,45 @@ private static long makeEpoch() { private static long makeNode() { - // ideally, we'd use the MAC address, but java doesn't expose that. So - // instead gather a number of local information and hash that. - InetAddress local; + /* + * We don't have access to the MAC address (in pure JAVA at least) but + * need to generate a node part that identify this host as uniquely as + * possible. + * The spec says that one option is to take as many source that + * identify this node as possible and hash them together. That's what + * we do here by gathering all the ip of this host as well as a few + * other sources. + */ try { - local = InetAddress.getLocalHost(); - } catch (UnknownHostException e) { - try { - local = InetAddress.getByAddress(new byte[]{ 127, 0, 0, 1 }); - } catch (UnknownHostException uhe) { - throw new RuntimeException(uhe); - } - } - Properties props = System.getProperties(); + MessageDigest digest = MessageDigest.getInstance("MD5"); + for (String address : getAllLocalAddresses()) + update(digest, address); - byte[] hash = hash(local.toString(), - props.getProperty("java.vendor"), - props.getProperty("java.vendor.url"), - props.getProperty("java.version"), - props.getProperty("os.arch"), - props.getProperty("os.name"), - props.getProperty("os.version")); - - long node = 0; - for (int i = 0; i < 6; i++) - node |= (0x00000000000000ffL & (long)hash[i]) << (i*8); - // Since we haven't use the mac address, the spec says that the first bit must be 1. - return node & 0x8000000000000000L; - } + Properties props = System.getProperties(); + update(digest, props.getProperty("java.vendor")); + update(digest, props.getProperty("java.vendor.url")); + update(digest, props.getProperty("java.version")); + update(digest, props.getProperty("os.arch")); + update(digest, props.getProperty("os.name")); + update(digest, props.getProperty("os.version")); - private static byte[] hash(String... data) { + byte[] hash = digest.digest(); - MessageDigest digest; - try { - digest = MessageDigest.getInstance("MD5"); + long node = 0; + for (int i = 0; i < 6; i++) + node |= (0x00000000000000ffL & (long)hash[i]) << (i*8); + // Since we don't use the mac address, the spec says that multicast + // bit (least significant bit of the first octet of the node ID) must be 1. + return node | 0x0000010000000000L; } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } + } - for (String block : data) - if (block != null) - digest.update(block.getBytes()); - - return digest.digest(); + private static void update(MessageDigest digest, String value) { + if (value != null) + digest.update(value.getBytes()); } private static long makeClockSeqAndNode() { @@ -273,4 +273,36 @@ static long makeMSB(long timestamp) { msb |= 0x0000000000001000L; // sets the version to 1. return msb; } + + private static Set getAllLocalAddresses() { + Set allIps = new HashSet(); + try { + InetAddress localhost = InetAddress.getLocalHost(); + allIps.add(localhost.toString()); + // Also return the hostname if available, it won't hurt (this does a dns lookup, it's only done once at startup) + allIps.add(localhost.getCanonicalHostName()); + InetAddress[] allMyIps = InetAddress.getAllByName(localhost.getCanonicalHostName()); + if (allMyIps != null) { + for (int i = 0; i < allMyIps.length; i++) + allIps.add(allMyIps[i].toString()); + } + } catch (UnknownHostException e) { + // Ignore, we'll try the network interfaces anyway + } + + try { + Enumeration en = NetworkInterface.getNetworkInterfaces(); + if (en != null) { + while (en.hasMoreElements()) { + Enumeration enumIpAddr = en.nextElement().getInetAddresses(); + while (enumIpAddr.hasMoreElements()) + allIps.add(enumIpAddr.nextElement().toString()); + } + } + } catch (SocketException e) { + // Ignore, if we relly go nothing so far, we'll throw an exception + } + + return allIps; + } } From a765293fe8006955f1b944013780fac99c2169d8 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Mon, 3 Dec 2012 19:48:24 +0100 Subject: [PATCH 102/719] Add keeping logs around on test fail --- .../src/test/java/com/datastax/driver/core/CCMBridge.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java index 05fbc863329..c788180a380 100644 --- a/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java +++ b/driver-core/src/test/java/com/datastax/driver/core/CCMBridge.java @@ -144,6 +144,10 @@ public static abstract class PerClassSingleNodeCluster { protected abstract Collection getTableDefinitions(); + public void errorOut() { + erroredOut = true; + } + @BeforeClass public static void createCluster() { erroredOut = false; From ead598d72ef31548c698598d30fa30aadcbe4906 Mon Sep 17 00:00:00 2001 From: Sylvain Lebresne Date: Fri, 30 Nov 2012 20:23:07 +0100 Subject: [PATCH 103/719] Refactor cluster initialization/configuration classes and add metrics --- driver-core/pom.xml | 7 + .../com/datastax/driver/core/Cluster.java | 224 +++++++++-------- .../datastax/driver/core/Configuration.java | 109 +++++++++ .../com/datastax/driver/core/Connection.java | 14 +- .../driver/core/ConnectionsConfiguration.java | 51 ---- .../driver/core/ControlConnection.java | 9 +- .../driver/core/HostConnectionPool.java | 6 +- .../{ClusterMetadata.java => Metadata.java} | 6 +- .../com/datastax/driver/core/Metrics.java | 230 ++++++++++++++++++ .../datastax/driver/core/PoolingOptions.java | 10 +- .../driver/core/PreparedStatement.java | 2 +- .../datastax/driver/core/ProtocolOptions.java | 43 +++- .../driver/core/RetryingCallback.java | 70 +++++- .../com/datastax/driver/core/Session.java | 2 +- .../datastax/driver/core/SocketOptions.java | 26 +- .../driver/core/policies/Policies.java | 4 + .../core/policies/TokenAwarePolicy.java | 4 +- driver-examples/stress/pom.xml | 2 +- .../com/datastax/driver/stress/Stress.java | 3 +- 19 files changed, 615 insertions(+), 207 deletions(-) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/Configuration.java delete mode 100644 driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java rename driver-core/src/main/java/com/datastax/driver/core/{ClusterMetadata.java => Metadata.java} (99%) create mode 100644 driver-core/src/main/java/com/datastax/driver/core/Metrics.java diff --git a/driver-core/pom.xml b/driver-core/pom.xml index 6276e12e7a7..8d3dd9d7511 100644 --- a/driver-core/pom.xml +++ b/driver-core/pom.xml @@ -47,6 +47,13 @@ jackson-core-asl 1.4.0 + + + com.yammer.metrics + metrics-core + 2.2.0 + + diff --git a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java index 7f397b5d05d..d724efeda39 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Cluster.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Cluster.java @@ -52,15 +52,10 @@ public class Cluster { } } - /** - * The default cassandra port for the native client protocol. - */ - public static final int DEFAULT_PORT = 9042; - final Manager manager; - private Cluster(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { - this.manager = new Manager(contactPoints, port, policies, authProvider); + private Cluster(List contactPoints, Configuration configuration) throws NoHostAvailableException { + this.manager = new Manager(contactPoints, configuration); this.manager.init(); } @@ -88,7 +83,7 @@ public static Cluster buildFrom(Initializer initializer) throws NoHostAvailableE if (contactPoints.isEmpty()) throw new IllegalArgumentException("Cannot build a cluster without contact points"); - return new Cluster(contactPoints, initializer.getPort(), initializer.getPolicies(), initializer.getAuthInfoProvider()); + return new Cluster(contactPoints, initializer.getConfiguration()); } /** @@ -136,7 +131,7 @@ public Session connect(String keyspace) throws NoHostAvailableException { * * @return the cluster metadata. */ - public ClusterMetadata getMetadata() { + public Metadata getMetadata() { return manager.metadata; } @@ -145,15 +140,27 @@ public ClusterMetadata getMetadata() { * * @return the cluster configuration. */ - public Cluster.Configuration getConfiguration() { + public Configuration getConfiguration() { return manager.configuration; } + /** + * The cluster metrics. + * + * @return the cluster metrics, or {@code null} if metrics collection has + * been disabled (see {@link Configuration#isMetricsEnabled}). + */ + public Metrics getMetrics() { + return manager.configuration.isMetricsEnabled() + ? manager.metrics + : null; + } + /** * Shutdown this cluster instance. * * This closes all connections from all the sessions of this {@code - * Cluster} instance and reclam all ressources used by it. + * Cluster} instance and reclaim all resources used by it. *

    * This method has no effect if the cluster was already shutdown. */ @@ -163,6 +170,14 @@ public void shutdown() { /** * Initializer for {@link Cluster} instances. + *

    + * If you want to create a new {@code Cluster} instance programmatically, + * then it is advised to use {@link Cluster.Builder} (obtained through the + * {@link Cluster#builder} method). + *

    + * But it is also possible to implement a custom {@code Initializer} that + * retrieve initialization from a web-service or from a configuration file + * for instance. */ public interface Initializer { @@ -175,30 +190,21 @@ public interface Initializer { public List getContactPoints(); /** - * The port to use to connect to Cassandra hosts. + * The configuration to use for the new cluster. *

    - * This port will be used to connect to all of the Cassandra cluster - * hosts, not only the contact points. This means that all Cassandra - * host must be configured to listen on the same port. + * Note that some configuration can be modified after the cluster + * initialization but some other cannot. In particular, the ones that + * cannot be change afterwards includes: + *

      + *
    • the port use to connect to Cassandra nodes (see {@link ProtocolOptions}).
    • + *
    • the policies used (see {@link Policies}).
    • + *
    • the authentication info provided (see {@link Configuration}).
    • + *
    • whether metrics are enabled (see {@link Configuration}).
    • + *
    * - * @return the port to use to connect to Cassandra hosts. + * @return the configuration to use for the new cluster. */ - public int getPort(); - - /** - * Returns the policies to use for this cluster. - * - * @return the policies to use for this cluster. - */ - public Policies getPolicies(); - - /** - * The authentication provider to use to connect to the Cassandra cluster. - * - * @return the authentication provider to use. Use - * AuthInfoProvider.NONE if authentication is not to be used. - */ - public AuthInfoProvider getAuthInfoProvider(); + public Configuration getConfiguration(); } /** @@ -207,13 +213,18 @@ public interface Initializer { public static class Builder implements Initializer { private final List addresses = new ArrayList(); - private int port = DEFAULT_PORT; + private int port = ProtocolOptions.DEFAULT_PORT; private AuthInfoProvider authProvider = AuthInfoProvider.NONE; private LoadBalancingPolicy loadBalancingPolicy; private ReconnectionPolicy reconnectionPolicy; private RetryPolicy retryPolicy; + private ProtocolOptions.Compression compression = ProtocolOptions.Compression.NONE; + private boolean metricsEnabled = true; + private final PoolingOptions poolingOptions = new PoolingOptions(); + private final SocketOptions socketOptions = new SocketOptions(); + public List getContactPoints() { return addresses; } @@ -232,15 +243,6 @@ public Builder withPort(int port) { return this; } - /** - * The port to use to connect to Cassandra hosts. - * - * @return the port to use to connect to Cassandra hosts. - */ - public int getPort() { - return port; - } - /** * Adds a contact point. * @@ -349,23 +351,6 @@ public Builder withRetryPolicy(RetryPolicy policy) { return this; } - /** - * Returns the policies to use for this cluster. - *

    - * The policies used are the one set by the {@code with*} methods of - * this builder, or the default ones defined in {@link Policies} for - * the policies that hasn't been explicitely set. - * - * @return the policies to use for this cluster. - */ - public Policies getPolicies() { - return new Policies( - loadBalancingPolicy == null ? Policies.DEFAULT_LOAD_BALANCING_POLICY : loadBalancingPolicy, - reconnectionPolicy == null ? Policies.DEFAULT_RECONNECTION_POLICY : reconnectionPolicy, - retryPolicy == null ? Policies.DEFAULT_RETRY_POLICY : retryPolicy - ); - } - /** * Use the provided {@code AuthInfoProvider} to connect to Cassandra hosts. *

    @@ -381,63 +366,89 @@ public Builder withAuthInfoProvider(AuthInfoProvider authInfoProvider) { } /** - * The authentication provider to use to connect to the Cassandra cluster. + * Sets the compression to use for the transport. + * + * @param compression the compression to set + * @return this Builder * - * @return the authentication provider set through {@link #withAuthInfoProvider} - * or AuthInfoProvider.NONE if nothing was set. + * @see ProtocolOptions.Compression */ - public AuthInfoProvider getAuthInfoProvider() { - return this.authProvider; + public Builder withCompression(ProtocolOptions.Compression compression) { + this.compression = compression; + return this; } /** - * Build the cluster with the configured set of initial contact points - * and policies. - * - * This is a shorthand for {@code Cluster.buildFrom(this)}. + * Disable metrics collection for the created cluster (metrics are + * enabled by default otherwise). * - * @return the newly build Cluster instance. - * - * @throws NoHostAvailableException if none of the contact points - * provided can be reached. - * @throws AuthenticationException if while contacting the initial - * contact points an authencation error occurs. + * @return this builder */ - public Cluster build() throws NoHostAvailableException { - return Cluster.buildFrom(this); + public Builder withoutMetrics() { + this.metricsEnabled = false; + return this; } - } - /** - * The configuration of the cluster. - */ - public static class Configuration { - - private final Policies policies; - private final ConnectionsConfiguration connections; + /** + * The pooling options used by this builder. + * + * @return the pooling options that will be used by this builder. You + * can use the returned object to define the initial pooling options + * for the built cluster. + */ + public PoolingOptions poolingOptions() { + return poolingOptions; + } - private Configuration(Cluster.Manager manager, Policies policies) { - this.policies = policies; - this.connections = new ConnectionsConfiguration(manager); + /** + * The socket options used by this builder. + * + * @return the socket options that will be used by this builder. You + * can use the returned object to define the initial socket options + * for the built cluster. + */ + public SocketOptions socketOptions() { + return socketOptions; } /** - * The policies set for the cluster. + * The configuration that will be used for the new cluster. + *

    + * You should not modify this object directly as change made + * to the returned object may not be used by the cluster build. + * Instead, you should use the other methods of this {@code Builder}. * - * @return the policies set for the cluster. + * @return the configuration to use for the new cluster. */ - public Policies getPolicies() { - return policies; + public Configuration getConfiguration() { + Policies policies = new Policies( + loadBalancingPolicy == null ? Policies.DEFAULT_LOAD_BALANCING_POLICY : loadBalancingPolicy, + reconnectionPolicy == null ? Policies.DEFAULT_RECONNECTION_POLICY : reconnectionPolicy, + retryPolicy == null ? Policies.DEFAULT_RETRY_POLICY : retryPolicy + ); + return new Configuration(policies, + new ProtocolOptions(port).setCompression(compression), + poolingOptions, + socketOptions, + authProvider, + metricsEnabled); } /** - * Configuration related to the connections the driver maintains to the - * Cassandra hosts. + * Build the cluster with the configured set of initial contact points + * and policies. + * + * This is a shorthand for {@code Cluster.buildFrom(this)}. + * + * @return the newly build Cluster instance. * - * @return the configuration of the connections to Cassandra hosts. + * @throws NoHostAvailableException if none of the contact points + * provided can be reached. + * @throws AuthenticationException if while contacting the initial + * contact points an authencation error occurs. */ - public ConnectionsConfiguration getConnectionsConfiguration() { - return connections; + public Cluster build() throws NoHostAvailableException { + return Cluster.buildFrom(this); } } @@ -452,14 +463,14 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // Initial contacts point final List contactPoints; - final int port; - private final Set sessions = new CopyOnWriteArraySet(); + final Set sessions = new CopyOnWriteArraySet(); - final ClusterMetadata metadata; + final Metadata metadata; final Configuration configuration; + final Metrics metrics; final Connection.Factory connectionFactory; - private final ControlConnection controlConnection; + final ControlConnection controlConnection; final ConvictionPolicy.Factory convictionPolicyFactory = new ConvictionPolicy.Simple.Factory(); @@ -477,17 +488,20 @@ class Manager implements Host.StateListener, Connection.DefaultResponseHandler { // less clear behavior. final Map preparedQueries = new ConcurrentHashMap(); - private Manager(List contactPoints, int port, Policies policies, AuthInfoProvider authProvider) throws NoHostAvailableException { - this.port = port; - this.configuration = new Configuration(this, policies); - this.metadata = new ClusterMetadata(this); + private Manager(List contactPoints, Configuration configuration) throws NoHostAvailableException { + this.configuration = configuration; + this.metadata = new Metadata(this); this.contactPoints = contactPoints; - this.connectionFactory = new Connection.Factory(this, authProvider); + this.connectionFactory = new Connection.Factory(this, configuration.getAuthInfoProvider()); for (InetAddress address : contactPoints) addHost(address, false); this.controlConnection = new ControlConnection(this, metadata); + + this.metrics = new Metrics(this); + this.configuration.register(this); + this.controlConnection.connect(); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Configuration.java b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java new file mode 100644 index 00000000000..47e9c70d1f8 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Configuration.java @@ -0,0 +1,109 @@ +package com.datastax.driver.core; + +import com.datastax.driver.core.policies.*; + +/** + * The configuration of the cluster. + * This handle setting: + *

      + *
    • Cassandra binary protocol level configuration (compression).
    • + *
    • Connection pooling configurations.
    • + *
    • low-level tcp configuration options (tcpNoDelay, keepAlive, ...).
    • + *
    + */ +public class Configuration { + + private final Policies policies; + + private final ProtocolOptions protocolOptions; + private final PoolingOptions poolingOptions; + private final SocketOptions socketOptions; + + private final AuthInfoProvider authProvider; + private final boolean metricsEnabled; + + public Configuration() { + this(new Policies(), + new ProtocolOptions(), + new PoolingOptions(), + new SocketOptions(), + AuthInfoProvider.NONE, + true); + } + + public Configuration(Policies policies, + ProtocolOptions protocolOptions, + PoolingOptions poolingOptions, + SocketOptions socketOptions, + AuthInfoProvider authProvider, + boolean metricsEnabled) { + this.policies = policies; + this.protocolOptions = protocolOptions; + this.poolingOptions = poolingOptions; + this.socketOptions = socketOptions; + this.authProvider = authProvider; + this.metricsEnabled = metricsEnabled; + } + + void register(Cluster.Manager manager) { + protocolOptions.register(manager); + poolingOptions.register(manager); + } + + /** + * The policies set for the cluster. + * + * @return the policies set for the cluster. + */ + public Policies getPolicies() { + return policies; + } + + /** + * The low-level tcp configuration options used (tcpNoDelay, keepAlive, ...). + * + * @return the socket options. + */ + public SocketOptions getSocketOptions() { + return socketOptions; + } + + /** + * The Cassandra binary protocol level configuration (compression). + * + * @return the protocol options. + */ + public ProtocolOptions getProtocolOptions() { + return protocolOptions; + } + + /** + * The connection pooling configuration. + * + * @return the pooling options. + */ + public PoolingOptions getPoolingOptions() { + return poolingOptions; + } + + /** + * The authentication provider used to connect to the Cassandra cluster. + * + * @return the authentication provider in use. + */ + public AuthInfoProvider getAuthInfoProvider() { + return authProvider; + } + + /** + * Whether metrics collection is enabled for the cluster instance. + *

    + * Metrics collection is enabled by default but can be disabled at cluster + * construction time through {@link Cluster.Builder#withoutMetrics}. + * + * @return whether metrics collection is enabled for the cluster instance. + */ + public boolean isMetricsEnabled() { + return metricsEnabled; + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/Connection.java b/driver-core/src/main/java/com/datastax/driver/core/Connection.java index 83241ad64e1..c8606e87597 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Connection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Connection.java @@ -80,7 +80,7 @@ private Connection(String name, InetAddress address, Factory factory) throws Con bootstrap.setPipelineFactory(new PipelineFactory(this)); - ChannelFuture future = bootstrap.connect(new InetSocketAddress(address, factory.port)); + ChannelFuture future = bootstrap.connect(new InetSocketAddress(address, factory.getPort())); writer.incrementAndGet(); try { @@ -303,27 +303,29 @@ public String toString() { public static class Factory { - public final int port; private final ExecutorService bossExecutor = Executors.newCachedThreadPool(); private final ExecutorService workerExecutor = Executors.newCachedThreadPool(); private final ConcurrentMap idGenerators = new ConcurrentHashMap(); public final DefaultResponseHandler defaultHandler; - public final ConnectionsConfiguration configuration; + public final Configuration configuration; public final AuthInfoProvider authProvider; public Factory(Cluster.Manager manager, AuthInfoProvider authProvider) { - this(manager.port, manager, manager.configuration.getConnectionsConfiguration(), authProvider); + this(manager, manager.configuration, authProvider); } - private Factory(int port, DefaultResponseHandler defaultHandler, ConnectionsConfiguration configuration, AuthInfoProvider authProvider) { - this.port = port; + private Factory(DefaultResponseHandler defaultHandler, Configuration configuration, AuthInfoProvider authProvider) { this.defaultHandler = defaultHandler; this.configuration = configuration; this.authProvider = authProvider; } + public int getPort() { + return configuration.getProtocolOptions().getPort(); + } + /** * Opens a new connection to the node this factory points to. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java b/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java deleted file mode 100644 index 4d0f74db098..00000000000 --- a/driver-core/src/main/java/com/datastax/driver/core/ConnectionsConfiguration.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.datastax.driver.core; - -/** - * Handle all configuration related of the connections to the Cassandra hosts. - * - * This handle setting: - *

      - *
    • low-level tcp configuration options (tcpNoDelay, keepAlive, ...).
    • - *
    • Cassandra binary protocol level configuration (compression).
    • - *
    • Connection pooling configurations.
    • - *
    - */ -public class ConnectionsConfiguration { - - private final SocketOptions socketOptions; - private final ProtocolOptions protocolOptions; - private final PoolingOptions poolingOptions; - - ConnectionsConfiguration(Cluster.Manager manager) { - this.socketOptions = new SocketOptions(); - this.protocolOptions = new ProtocolOptions(manager); - this.poolingOptions = new PoolingOptions(manager); - } - - /** - * The socket options. - * - * @return the socket options. - */ - public SocketOptions getSocketOptions() { - return socketOptions; - } - - /** - * The protocol options. - * - * @return the protocol options. - */ - public ProtocolOptions getProtocolOptions() { - return protocolOptions; - } - - /** - * The pooling options. - * - * @return the pooling options. - */ - public PoolingOptions getPoolingOptions() { - return poolingOptions; - } -} diff --git a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java index c3f23189906..c4bb9cba8b8 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ControlConnection.java @@ -47,7 +47,7 @@ class ControlConnection implements Host.StateListener { private volatile boolean isShutdown; - public ControlConnection(Cluster.Manager manager, ClusterMetadata metadata) { + public ControlConnection(Cluster.Manager manager, Metadata metadata) { this.cluster = manager; this.balancingPolicy = new RoundRobinPolicy(); this.balancingPolicy.init(manager.getCluster(), metadata.allHosts()); @@ -297,7 +297,7 @@ private void refreshNodeListAndTokenMap(Connection connection) throws Connection cluster.metadata.rebuildTokenMap(partitioner, tokenMap); } - static boolean waitForSchemaAgreement(Connection connection, ClusterMetadata metadata) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { + static boolean waitForSchemaAgreement(Connection connection, Metadata metadata) throws ConnectionException, BusyConnectionException, ExecutionException, InterruptedException { long start = System.currentTimeMillis(); long elapsed = 0; @@ -335,6 +335,11 @@ static boolean waitForSchemaAgreement(Connection connection, ClusterMetadata met return false; } + boolean isOpen() { + Connection c = connectionRef.get(); + return c != null && !c.isClosed(); + } + public void onUp(Host host) { balancingPolicy.onUp(host); } diff --git a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java index 8ccbf6baeba..600d602ad4d 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java +++ b/driver-core/src/main/java/com/datastax/driver/core/HostConnectionPool.java @@ -57,7 +57,7 @@ public void run() { } private PoolingOptions options() { - return manager.configuration().getConnectionsConfiguration().getPoolingOptions(); + return manager.configuration().getPoolingOptions(); } public Connection borrowConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { @@ -303,6 +303,10 @@ public void shutdown() { discardAvailableConnections(); } + public int opened() { + return open.get(); + } + private void discardAvailableConnections() { for (Connection connection : connections) { connection.close(); diff --git a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java similarity index 99% rename from driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java rename to driver-core/src/main/java/com/datastax/driver/core/Metadata.java index bd46c73d25a..4b035d47f36 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ClusterMetadata.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Metadata.java @@ -14,9 +14,9 @@ /** * Keeps metadata on the connected cluster, including known nodes and schema definitions. */ -public class ClusterMetadata { +public class Metadata { - private static final Logger logger = LoggerFactory.getLogger(ClusterMetadata.class); + private static final Logger logger = LoggerFactory.getLogger(Metadata.class); private final Cluster.Manager cluster; volatile String clusterName; @@ -25,7 +25,7 @@ public class ClusterMetadata { private volatile TokenMap tokenMap; - ClusterMetadata(Cluster.Manager cluster) { + Metadata(Cluster.Manager cluster) { this.cluster = cluster; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Metrics.java b/driver-core/src/main/java/com/datastax/driver/core/Metrics.java new file mode 100644 index 00000000000..79bc37eacb0 --- /dev/null +++ b/driver-core/src/main/java/com/datastax/driver/core/Metrics.java @@ -0,0 +1,230 @@ +package com.datastax.driver.core; + +import java.util.HashSet; +import java.util.Set; + +import com.yammer.metrics.core.Counter; +import com.yammer.metrics.core.Gauge; +import com.yammer.metrics.core.MetricsRegistry; +import com.yammer.metrics.core.Timer; +import com.yammer.metrics.reporting.JmxReporter; + +/** + * Metrics exposed by the driver. + *

    + * The metrics exposed by this class use the Metrics + * library and you should refer its documentation + * for details on how to handle the exposed metric objects. + *

    + * By default, metrics are exposed through JMX, which is very useful for + * development and browsing, but for production environment you may want to + * have a look at the reporters + * provided by the Metrics library which could be more efficient/adapted. + */ +public class Metrics { + + private final Cluster.Manager manager; + private final MetricsRegistry registry = new MetricsRegistry(); + private final JmxReporter jmxReporter = new JmxReporter(registry); + private final Errors errors = new Errors(); + + private final Timer requests = registry.newTimer(Metrics.class, "requests"); + + private final Gauge knownHosts = registry.newGauge(Metrics.class, "known-hosts", new Gauge() { + @Override + public Integer value() { + return manager.metadata.allHosts().size(); + } + }); + private final Gauge connectedTo = registry.newGauge(Metrics.class, "connected-to", new Gauge() { + @Override + public Integer value() { + Set s = new HashSet(); + for (Session session : manager.sessions) + s.addAll(session.manager.pools.keySet()); + return s.size(); + } + }); + private final Gauge openConnections = registry.newGauge(Metrics.class, "open-connections", new Gauge() { + @Override + public Integer value() { + int value = manager.controlConnection.isOpen() ? 1 : 0; + for (Session session : manager.sessions) + for (HostConnectionPool pool : session.manager.pools.values()) + value += pool.opened(); + return value; + } + }); + + Metrics(Cluster.Manager manager) { + this.manager = manager; + this.jmxReporter.start(); + } + + /** + * The registry containing all metrics. + *

    + * The metrics registry allows you to easily use the reporters that ships + * with Metrics + * or a custom written one. For instance, you can easily export metrics to + * csv files using: + *

    +     *     com.yammer.metrics.reporting.CsvReporter.enable(new File("measurements/"), 1, TimeUnit.SECONDS);
    +     * 
    + * + * @return the registry containing all metrics. + */ + public MetricsRegistry getRegistry() { + return registry; + } + + /** + * Metrics on the user requests performed on the Cluster. + *

    + * This metric exposes + *

      + *
    • the total number of requests.
    • + *
    • the requests rate (in requests per seconds), including 1, 5 and 15 minute rates.
    • + *
    • the mean, min and max latencies, as well as latency at a given percentile.
    • + *
    + * + * @return a {@code Timer} metric object exposing the rate and latency for + * user requests. + */ + public Timer getRequestsTimer() { + return requests; + } + + /** + * An object regrouping metrics related to the errors encountered. + * + * @return an object regrouping metrics related to the errors encountered. + */ + public Errors getErrorMetrics() { + return errors; + } + + /** + * The number of Cassandra hosts currently known by the driver (whether + * they are currently considered up or down). + * + * @return the number of Cassandra hosts currently known by the driver. + */ + public Gauge getKnownHosts() { + return knownHosts; + } + + /** + * The number of Cassandra hosts the driver is currently connected to (i.e. + * have at least one connection opened to). + * + * @return the number of Cassandra hosts the driver is currently connected to. + */ + public Gauge getConnectedToHosts() { + return connectedTo; + } + + /** + * The total number of currently opened connections to Cassandra hosts. + * + * @return The total number of currently opened connections to Cassandra hosts. + */ + public Gauge getOpenConnections() { + return openConnections; + } + + /** + * Metrics on errors encountered. + */ + public class Errors { + + private final Counter connectionErrors = registry.newCounter(Errors.class, "connection-errors"); + + private final Counter writeTimeouts = registry.newCounter(Errors.class, "write-timeouts"); + private final Counter readTimeouts = registry.newCounter(Errors.class, "read-timeouts"); + private final Counter unavailables = registry.newCounter(Errors.class, "unavailables"); + + private final Counter otherErrors = registry.newCounter(Errors.class, "other-errors"); + + private final Counter retries = registry.newCounter(Errors.class, "retries"); + private final Counter ignores = registry.newCounter(Errors.class, "ignores"); + + /** + * The number of connection to Cassandra nodes errors. + *

    + * This represents the number of times when a requests to a Cassandra + * has failed due to a connection problem. This thus also correspond to + * how often the driver had to pick a fallback host for a request. + *

    + * It is expected to get a few connection errors when a Cassandra dies + * (or is stopped) but if that value grow continuously you likely have + * a problem. + * + * @return the number of connection to Cassandra nodes errors. + */ + public Counter getConnectionErrors() { + return connectionErrors; + } + + /** + * The number of write requests that returned a timeout (independently + * of the final decision taken by the {@link RetryPolicy}). + * + * @return the number of write timeout. + */ + public Counter getWriteTimeouts() { + return writeTimeouts; + } + + /** + * The number of read requests that returned a timeout (independently + * of the final decision taken by the {@link RetryPolicy}). + * + * @return the number of read timeout. + */ + public Counter getReadTimeouts() { + return readTimeouts; + } + + /** + * The number of requests that returned an unavailable exception + * (independently of the final decision taken by the {@link RetryPolicy}). + * + * @return the number of unavailable exception. + */ + public Counter getUnavailables() { + return unavailables; + } + + /** + * The number of requests that returned an errors not accounted by + * another metric. This includes all type of invalid requests. + * + * @return the number of requests errors not accounted by another + * metric. + */ + public Counter getOthers() { + return otherErrors; + } + + /** + * The number of times a requests was retried due to the {@link RetryPolicy}. + * + * @return the number of times a requests was retried due to the {@link RetryPolicy}. + */ + public Counter getRetries() { + return retries; + } + + /** + * The number of times a requests timeout/unavailability was ignored + * due to the {@link RetryPolicy}. + * + * @return the number of times a requests timeout/unavailability was + * ignored due to the {@link RetryPolicy}. + */ + public Counter getIgnores() { + return ignores; + } + } +} diff --git a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java index 7f774e55b99..610702f2dcc 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PoolingOptions.java @@ -36,7 +36,7 @@ public class PoolingOptions { private static final int DEFAULT_MAX_POOL_LOCAL = 8; private static final int DEFAULT_MAX_POOL_REMOTE = 2; - private final Cluster.Manager manager; + private volatile Cluster.Manager manager; private volatile int minSimultaneousRequestsForLocal = DEFAULT_MIN_REQUESTS; private volatile int minSimultaneousRequestsForRemote = DEFAULT_MIN_REQUESTS; @@ -50,7 +50,9 @@ public class PoolingOptions { private volatile int maxConnectionsForLocal = DEFAULT_MAX_POOL_LOCAL; private volatile int maxConnectionsForRemote = DEFAULT_MAX_POOL_REMOTE; - PoolingOptions(Cluster.Manager manager) { + public PoolingOptions() {} + + void register(Cluster.Manager manager) { this.manager = manager; } @@ -193,13 +195,13 @@ public PoolingOptions setCoreConnectionsPerHost(HostDistance distance, int coreC case LOCAL: int oldLocalCore = coreConnectionsForLocal; coreConnectionsForLocal = coreConnections; - if (oldLocalCore < coreConnectionsForLocal) + if (oldLocalCore < coreConnectionsForLocal && manager != null) manager.ensurePoolsSizing(); break; case REMOTE: int oldRemoteCore = coreConnectionsForRemote; coreConnectionsForRemote = coreConnections; - if (oldRemoteCore < coreConnectionsForRemote) + if (oldRemoteCore < coreConnectionsForRemote && manager != null) manager.ensurePoolsSizing(); break; default: diff --git a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java index 62aab1fbf20..ac681c68dca 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java +++ b/driver-core/src/main/java/com/datastax/driver/core/PreparedStatement.java @@ -31,7 +31,7 @@ private PreparedStatement(ColumnDefinitions metadata, MD5Digest id, int[] routin this.routingKeyIndexes = routingKeyIndexes; } - static PreparedStatement fromMessage(ResultMessage.Prepared msg, ClusterMetadata clusterMetadata) { + static PreparedStatement fromMessage(ResultMessage.Prepared msg, Metadata clusterMetadata) { switch (msg.kind) { case PREPARED: ResultMessage.Prepared pmsg = (ResultMessage.Prepared)msg; diff --git a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java index f00861446d5..75b0410a10f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/ProtocolOptions.java @@ -5,12 +5,6 @@ */ public class ProtocolOptions { - private final Cluster.Manager manager; - - ProtocolOptions(Cluster.Manager manager) { - this.manager = manager; - } - /** * Compression supported by the Cassandra binary protocol. */ @@ -32,18 +26,43 @@ public String toString() { } }; + /** + * The default port for Cassandra native binary protocol: 9042. + */ + public static final int DEFAULT_PORT = 9042; + + private final int port; private volatile Compression compression = Compression.NONE; + private volatile Cluster.Manager manager; + /** - * The port to use to connect to the Cassandra hosts. - *

    - * The port must be set at cluster creation time (using {@link Cluster.Builder#withPort} - * for instance) and cannot be changed afterwards. + * Creates a new {@code ProtocolOptions} instance using the {@code DEFAULT_PORT}. + */ + public ProtocolOptions() { + this(DEFAULT_PORT); + } + + /** + * Creates a new {@code ProtocolOptions} instance using the provided port. + * + * @param port the port to use for the binary protocol. + */ + public ProtocolOptions(int port) { + this.port = port; + } + + void register(Cluster.Manager manager) { + this.manager = manager; + } + + /** + * The port used to connect to the Cassandra hosts. * - * @return the port to use to connect to the Cassandra hosts. + * @return the port used to connect to the Cassandra hosts. */ public int getPort() { - return manager.port; + return port; } /** diff --git a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java index fa6f08a4206..a053a51927f 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java +++ b/driver-core/src/main/java/com/datastax/driver/core/RetryingCallback.java @@ -23,6 +23,8 @@ import org.apache.cassandra.exceptions.ReadTimeoutException; import org.apache.cassandra.exceptions.WriteTimeoutException; +import com.yammer.metrics.core.TimerContext; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,6 +39,7 @@ class RetryingCallback implements Connection.ResponseCallback { private final Session.Manager manager; private final Connection.ResponseCallback callback; + private final TimerContext timerContext; private final Iterator queryPlan; private volatile Host current; @@ -52,6 +55,14 @@ public RetryingCallback(Session.Manager manager, Connection.ResponseCallback cal this.callback = callback; this.queryPlan = manager.loadBalancer.newQueryPlan(query); + + this.timerContext = manager.configuration().isMetricsEnabled() + ? metrics().getRequestsTimer().time() + : null; + } + + private Metrics metrics() { + return manager.cluster.manager.metrics; } public void sendRequest() { @@ -61,7 +72,7 @@ public void sendRequest() { if (query(host)) return; } - callback.onException(null, new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); + setFinalException(null, new NoHostAvailableException(errors == null ? Collections.emptyMap() : errors)); } private boolean query(Host host) { @@ -73,12 +84,14 @@ private boolean query(Host host) { try { // Note: this is not perfectly correct to use getConnectTimeoutMillis(), but // until we provide a more fancy to control query timeouts, it's not a bad solution either - connection = currentPool.borrowConnection(manager.configuration().getConnectionsConfiguration().getSocketOptions().getConnectTimeoutMillis(), TimeUnit.MILLISECONDS); + connection = currentPool.borrowConnection(manager.configuration().getSocketOptions().getConnectTimeoutMillis(), TimeUnit.MILLISECONDS); current = host; connection.write(this); return true; } catch (ConnectionException e) { // If we have any problem with the connection, move to the next node. + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getConnectionErrors().inc(); if (connection != null) currentPool.returnConnection(connection); logError(host.getAddress(), e.getMessage()); @@ -144,6 +157,18 @@ else if (request instanceof ExecuteMessage) { return request; } + private void setFinalResult(Connection connection, Message.Response response) { + if (timerContext != null) + timerContext.stop(); + callback.onSet(connection, response); + } + + private void setFinalException(Connection connection, Exception exception) { + if (timerContext != null) + timerContext.stop(); + callback.onException(connection, exception); + } + public void onSet(Connection connection, Message.Response response) { if (currentPool == null) { @@ -156,7 +181,7 @@ public void onSet(Connection connection, Message.Response response) { try { switch (response.type) { case RESULT: - callback.onSet(connection, response); + setFinalResult(connection, response); break; case ERROR: ErrorMessage err = (ErrorMessage)response; @@ -165,29 +190,43 @@ public void onSet(Connection connection, Message.Response response) { switch (err.error.code()) { case READ_TIMEOUT: assert err.error instanceof ReadTimeoutException; + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getReadTimeouts().inc(); + ReadTimeoutException rte = (ReadTimeoutException)err.error; ConsistencyLevel rcl = ConsistencyLevel.from(rte.consistency); retry = retryPolicy.onReadTimeout(rcl, rte.blockFor, rte.received, rte.dataPresent, queryRetries); break; case WRITE_TIMEOUT: assert err.error instanceof WriteTimeoutException; + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getWriteTimeouts().inc(); + WriteTimeoutException wte = (WriteTimeoutException)err.error; ConsistencyLevel wcl = ConsistencyLevel.from(wte.consistency); retry = retryPolicy.onWriteTimeout(wcl, WriteType.from(wte.writeType), wte.blockFor, wte.received, queryRetries); break; case UNAVAILABLE: assert err.error instanceof UnavailableException; + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getUnavailables().inc(); + UnavailableException ue = (UnavailableException)err.error; ConsistencyLevel ucl = ConsistencyLevel.from(ue.consistency); retry = retryPolicy.onUnavailable(ucl, ue.required, ue.alive, queryRetries); break; case OVERLOADED: // Try another node + logger.warn("Host {} is overloaded, trying next host.", connection.address); + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); retry(false, null); return; case IS_BOOTSTRAPPING: // Try another node logger.error("Query sent to {} but it is bootstrapping. This shouldn't happen but trying next host.", connection.address); + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); retry(false, null); return; case UNPREPARED: @@ -198,7 +237,7 @@ public void onSet(Connection connection, Message.Response response) { // This shouldn't happen String msg = String.format("Tried to execute unknown prepared query %s", pqnf.id); logger.error(msg); - callback.onException(connection, new DriverInternalError(msg)); + setFinalException(connection, new DriverInternalError(msg)); return; } @@ -220,31 +259,40 @@ public void onSet(Connection connection, Message.Response response) { retry(false, null); return; } + break; + default: + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getOthers().inc(); + break; } if (retry == null) - callback.onSet(connection, response); + setFinalResult(connection, response); else { switch (retry.getType()) { case RETRY: ++queryRetries; + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getRetries().inc(); retry(true, retry.getRetryConsistencyLevel()); break; case RETHROW: - callback.onSet(connection, response); + setFinalResult(connection, response); break; case IGNORE: - callback.onSet(connection, new ResultMessage.Void()); + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getIgnores().inc(); + setFinalResult(connection, new ResultMessage.Void()); break; } } break; default: - callback.onSet(connection, response); + setFinalResult(connection, response); break; } } catch (Exception e) { - callback.onException(connection, e); + setFinalException(connection, e); } } @@ -260,13 +308,15 @@ public void onException(Connection connection, Exception exception) { } if (exception instanceof ConnectionException) { + if (manager.configuration().isMetricsEnabled()) + metrics().getErrorMetrics().getConnectionErrors().inc(); ConnectionException ce = (ConnectionException)exception; logError(ce.address, ce.getMessage()); retry(false, null); return; } - callback.onException(connection, exception); + setFinalException(connection, exception); } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/Session.java b/driver-core/src/main/java/com/datastax/driver/core/Session.java index 3575896093f..e87f67c66be 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/Session.java +++ b/driver-core/src/main/java/com/datastax/driver/core/Session.java @@ -234,7 +234,7 @@ public Connection.Factory connectionFactory() { return cluster.manager.connectionFactory; } - public Cluster.Configuration configuration() { + public Configuration configuration() { return cluster.manager.configuration; } diff --git a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java index 957a0762496..261ff511341 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java +++ b/driver-core/src/main/java/com/datastax/driver/core/SocketOptions.java @@ -16,59 +16,71 @@ public class SocketOptions { private volatile Integer receiveBufferSize; private volatile Integer sendBufferSize; + /** + * Creates a new {@code SocketOptions} instance with default values. + */ + public SocketOptions() {} + public int getConnectTimeoutMillis() { return connectTimeoutMillis; } - public void setConnectTimeoutMillis(int connectTimeoutMillis) { + public SocketOptions setConnectTimeoutMillis(int connectTimeoutMillis) { this.connectTimeoutMillis = connectTimeoutMillis; + return this; } public Boolean getKeepAlive() { return keepAlive; } - public void setKeepAlive(boolean keepAlive) { + public SocketOptions setKeepAlive(boolean keepAlive) { this.keepAlive = keepAlive; + return this; } public Boolean getReuseAddress() { return reuseAddress; } - public void setReuseAddress(boolean reuseAddress) { + public SocketOptions setReuseAddress(boolean reuseAddress) { this.reuseAddress = reuseAddress; + return this; } public Integer getSoLinger() { return soLinger; } - public void setSoLinger(int soLinger) { + public SocketOptions setSoLinger(int soLinger) { this.soLinger = soLinger; + return this; } public Boolean getTcpNoDelay() { return tcpNoDelay; } - public void setTcpNoDelay(boolean tcpNoDelay) { + public SocketOptions setTcpNoDelay(boolean tcpNoDelay) { this.tcpNoDelay = tcpNoDelay; + return this; } public Integer getReceiveBufferSize() { return receiveBufferSize; } - public void setReceiveBufferSize(int receiveBufferSize) { + public SocketOptions setReceiveBufferSize(int receiveBufferSize) { this.receiveBufferSize = receiveBufferSize; + return this; } public Integer getSendBufferSize() { return sendBufferSize; } - public void setSendBufferSize(int sendBufferSize) { + public SocketOptions setSendBufferSize(int sendBufferSize) { this.sendBufferSize = sendBufferSize; + return this; } } diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java index 026fe84b18a..96f5a5d9615 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/Policies.java @@ -31,6 +31,10 @@ public class Policies { private final ReconnectionPolicy reconnectionPolicy; private final RetryPolicy retryPolicy; + public Policies() { + this(DEFAULT_LOAD_BALANCING_POLICY, DEFAULT_RECONNECTION_POLICY, DEFAULT_RETRY_POLICY); + } + /** * Creates a new {@code Policies} object using the provided policies. * diff --git a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java index a759aa1dff8..050c7e8da20 100644 --- a/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java +++ b/driver-core/src/main/java/com/datastax/driver/core/policies/TokenAwarePolicy.java @@ -19,7 +19,7 @@ *

  • the iterator return by the {@code newQueryPlan} method will first * return the {@code LOCAL} replicas for the query (based on {@link Query#getRoutingKey}) * if possible (i.e. if the query {@code getRoutingKey} method - * doesn't return {@code null} and if {@link ClusterMetadata#getReplicas} + * doesn't return {@code null} and if {@link Metadata#getReplicas} * returns a non empty set of replicas for that partition key). If no * local replica can be either found or successfully contacted, the rest * of the query plan will fallback to one of the child policy.
  • @@ -34,7 +34,7 @@ public class TokenAwarePolicy implements LoadBalancingPolicy { private final LoadBalancingPolicy childPolicy; - private ClusterMetadata clusterMetadata; + private Metadata clusterMetadata; /** * Creates a new {@code TokenAware} policy that wraps the provided child diff --git a/driver-examples/stress/pom.xml b/driver-examples/stress/pom.xml index e2c88baacd4..13623f326f3 100644 --- a/driver-examples/stress/pom.xml +++ b/driver-examples/stress/pom.xml @@ -21,7 +21,7 @@ com.yammer.metrics metrics-core - 2.1.2 + 2.2.0 diff --git a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java index 3294b0b922c..663fab0cf60 100644 --- a/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java +++ b/driver-examples/stress/src/main/java/com/datastax/driver/stress/Stress.java @@ -31,6 +31,7 @@ private static void printHelp(OptionParser parser, Collection generators System.out.println("Usage: stress [