aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTschaen, Brendan <ctschaen@att.com>2018-10-16 15:13:32 -0400
committerTschaen, Brendan <ctschaen@att.com>2018-10-16 15:15:16 -0400
commit73f8de325b31d350883f6752fb04d63c41112e8f (patch)
treea786305e313d8cdc040b50f995ed9342173ee571
parent389265e6342302ce5a8db5d4a3cc215e2c24c97a (diff)
Initial commit
Change-Id: I510baf73e4d35b651fb04e5b8e038dacb6a5a130 Issue-ID: MUSIC-149 Signed-off-by: Tschaen, Brendan <ctschaen@att.com>
-rw-r--r--LICENSE201
-rwxr-xr-xREADME.md160
-rw-r--r--docs/histo_graph.svg2
-rw-r--r--docs/redo_arch.svg2
-rw-r--r--docs/specs.md676
-rw-r--r--docs/uml.svg2
-rwxr-xr-xpom.xml162
-rw-r--r--src/main/java/com/att/research/exceptions/MDBCServiceException.java88
-rw-r--r--src/main/java/com/att/research/exceptions/QueryException.java90
-rw-r--r--src/main/java/com/att/research/logging/EELFLoggerDelegate.java339
-rw-r--r--src/main/java/com/att/research/logging/format/AppMessages.java156
-rw-r--r--src/main/java/com/att/research/logging/format/ErrorSeverity.java37
-rw-r--r--src/main/java/com/att/research/logging/format/ErrorTypes.java44
-rw-r--r--src/main/java/com/att/research/mdbc/ArchiveProcess.java43
-rw-r--r--src/main/java/com/att/research/mdbc/Configuration.java18
-rw-r--r--src/main/java/com/att/research/mdbc/DatabaseOperations.java443
-rw-r--r--src/main/java/com/att/research/mdbc/DatabasePartition.java190
-rw-r--r--src/main/java/com/att/research/mdbc/LockId.java46
-rw-r--r--src/main/java/com/att/research/mdbc/MDBCUtils.java62
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcCallableStatement.java738
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcConnection.java419
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java743
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcServer.java162
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcServerLogic.java312
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcStatement.java416
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/MusicSqlManager.java300
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/ProxyStatement.java1262
-rw-r--r--src/main/java/com/att/research/mdbc/Range.java34
-rw-r--r--src/main/java/com/att/research/mdbc/RedoRow.java29
-rw-r--r--src/main/java/com/att/research/mdbc/StateManager.java205
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/TableInfo.java75
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java71
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java180
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/config-0.json16
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/ranges.json14
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json19
-rw-r--r--src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java125
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java287
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/CassandraMixin.java1288
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/DBInterface.java91
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MixinFactory.java125
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MusicConnector.java124
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MusicInterface.java178
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/MusicMixin.java249
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MySQLMixin.java784
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/Operation.java31
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/OperationType.java5
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java19
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java15
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java15
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/StagingTable.java50
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java15
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TitReference.java12
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java19
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java206
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/Utils.java220
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/package-info.java47
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/package-info.java87
-rw-r--r--src/main/java/com/att/research/mdbc/tests/ConnectionTest.java419
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/MAIN.java106
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test.java105
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test_Delete.java70
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test_Insert.java94
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test_Transactions.java74
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/package-info.java165
-rw-r--r--src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java71
-rw-r--r--src/main/java/com/att/research/mdbc/tools/CreatePartition.java66
-rwxr-xr-xsrc/main/javadoc/overview.html37
-rwxr-xr-xsrc/main/resources/META-INF/services/java.sql.Driver1
-rwxr-xr-xsrc/main/resources/log4j.properties14
-rw-r--r--src/main/resources/logback.xml370
-rwxr-xr-xsrc/main/resources/mdbc.properties12
-rw-r--r--src/main/resources/mdbc_driver.properties13
-rw-r--r--src/main/resources/music.properties8
-rwxr-xr-xsrc/main/resources/tests.json163
-rwxr-xr-xsrc/main/shell/mk_jboss_module57
-rwxr-xr-xsrc/main/shell/run_h2_server27
-rw-r--r--src/test/java/com/att/research/mdbc/MDBCUtilsTest.java71
-rwxr-xr-xsrc/test/java/com/att/research/mdbc/test/ALLTESTS.java14
-rwxr-xr-xsrc/test/java/com/att/research/mdbc/test/BasicTest.java77
-rwxr-xr-xsrc/test/java/com/att/research/mdbc/test/CrossSiteTest.java447
-rwxr-xr-xsrc/test/java/com/att/research/mdbc/test/TestCommon.java25
-rwxr-xr-xsrc/test/java/com/att/research/mdbc/test/TransactionTest.java164
-rwxr-xr-xsrc/test/java/org/openecomp/sdnc/sli/resource/dblib/CachedDataSource.java7
-rwxr-xr-xsrc/test/java/org/openecomp/sdnc/sli/resource/dblib/DBResourceManager.java87
-rwxr-xr-xsrc/test/java/org/openecomp/sdnc/sli/resource/dblib/StressTest.java225
-rwxr-xr-xsrc/test/resources/dblib.properties9
-rwxr-xr-xsrc/test/resources/simplelogger.properties34
88 files changed, 14780 insertions, 0 deletions
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100755
index 0000000..49f750d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,160 @@
+ETDB
+====
+
+To enable edge computing in its full capacity, a crucial requirement is to manage the state of edge applications, preferably in database that provides the full features of SQL including joins and transactions. The key challenge here is to provide a replicated database for the edge that can scale to thousands of geo-distributed nodes. Existing solutions either provide semantics that are too weak (PostgreSQL replicates asynchronously) or too strong and hence expensive to realize in a geo-distributed network with its WAN latencies and complex failure modes (MariaDb, Spanner, provide full transactionality). Inspired by entry consistency in shared memory systems, wherein only the lock holder for an object obtains sequential consistency for the object, we define the notion of an entry transactional database, which is a novel partitioned database in which only the “owner” of a partition obtains full ACID transactionality. In this work, we define the semantics of an entry transactional database, describe the hard challenges faced in building it and present a novel middleware called mdbc that combines existing SQL databases with an underlying geo-distributed entry consistent store to provide entry transactional semantics. Further, we present crucial use cases such as a federated regional controller for the network control plane and a state management service for edge mobility enabled by entry transactionality.
+
+## Building ETDB
+
+ETDB is built with Maven. This directory contains two pom.xml files.
+The first (*pom.xml*) will build a jar file to be used by applications wishing to use the
+ETDB JDBC driver.
+The second (*pom-h2server.xml*) is used to built the special code that needs to be loaded
+into an H2 server, when running ETDB against a copy of H2 running as a server.
+
+### Building the JBoss ETDB Module
+
+There is a shell script (located in `src/main/shell/mk_jboss_module`) which, when run in
+the mdbc source directory, will create a tar file `target/mdbc-jboss-module.tar` which can
+be used as a JBoss module. This tar file should be installed by un-taring it in the
+$JBOSS_DIR/modules directory on the JBoss server.
+
+## Using ETDB
+
+This package provides a JDBC driver that can be used to mirror the contents of a database
+to and from Cassandra. The mirroring occurs as a side effect of execute() statements against
+a JDBC connection, and triggers placed in the database to catch database modifications.
+The initial implementation is written to support H2, MySQL, and MariaDB databases.
+
+This JDBC driver will intercept all table creations, SELECTs, INSERTs, DELETEs, and UPDATEs
+made to the underlying database, and make sure they are copied to Cassandra.
+In addition, for every table XX that is created, another table DIRTY\_XX will be created to
+communicate the existence of dirty rows to other Cassandra replicas (with the Cassandra2
+Mixin, the table is called DIRTY\_\_\_\_ and there is only one table).
+Dirty rows will be copied, as needed back into the database from Cassandra before any SELECT.
+
+### To use directly with JDBC
+
+1. Add this jar, and all dependent jars to your CLASSPATH.
+2. Rewrite your JDBC URLs from jdbc:_yourdb_:... to jdbc:mdbc:....
+3. If you supply properties to the DriverManager.getConnection(String, Properties) call,
+ use the properties defined below to control behavior of the proxy.
+4. Load the driver using the following call:
+ Class.forName("com.att.research.mdbc.ProxyDriver");
+
+The following properties can be passed to the JDBC DriverManager.getConnection(String, Properties)
+call to influence how ETDB works.
+
+| Property Name | Property Value | Default Value |
+|--------------------|--------------------------------------------------------------------------------|---------------|
+| MDBC\_DB\_MIXIN | The mixin name to use to select the database mixin to use for this connection. | mysql |
+| MDBC\_MUSIC\_MIXIN | The mixin name to use to select the MUSIC mixin to use for this connection. | cassandra2 |
+| myid | The ID of this replica in the collection of replicas sharing the same tables. | 0 |
+| replicas | A comma-separated list of replica names for the collection of replicas sharing the same tables. | the value of myid |
+| music\_keyspace | The keyspace name to use in Cassandra for all tables created by this instance of ETDB. | mdbc |
+| music\_address | The IP address to use to connect to Cassandra. | localhost |
+| music\_rfactor | The replication factor to use for the new keyspace that is created. | 2 |
+| disabled | If set to true the mirroring is completely disabled; this is the equivalent of using the database driver directly. | false |
+
+The values of the mixin properties may be:
+
+| Property Name | Property Value | Purpose |
+|--------------------|----------------|---------------|
+| MDBC\_DB\_MIXIN | h2 | This mixin provides access to either an in-memory, or a local (file-based) version of the H2 database. |
+| MDBC\_DB\_MIXIN | h2server | This mixin provides access to a copy of the H2 database running as a server. Because the server needs special Java classes in order to handle certain TRIGGER actions, the server must be et up in a special way (see below). |
+| MDBC\_DB\_MIXIN | mysql | This mixin provides access to MySQL or MariaDB (10.2+) running on a remote server. |
+| MDBC\_MUSIC\_MIXIN | cassandra | A Cassandra based persistence layer (without any of the table locking that MUSIC normally provides). |
+| MDBC\_MUSIC\_MIXIN | cassandra2 | Similar to the _cassandra_ mixin, but stores all dirty row information in one table, rather than one table per real table. |
+
+### To Define a JBoss DataSource
+
+The following code snippet can be used as a guide when setting up a JBoss DataSource.
+This snippet goes in the JBoss *service.xml* file. The connection-property tags may
+need to be added/modified for your purposes. See the table above for names and values for
+these tags.
+
+```
+<datasources>
+ <datasource jta="true" jndi-name="java:jboss/datasources/ProcessEngine" pool-name="ProcessEngine" enabled="true" use-java-context="true" use-ccm="true">
+ <connection-url>jdbc:mdbc:/opt/jboss-eap-6.2.4/standalone/camunda-h2-dbs/process-engine;DB_CLOSE_DELAY=-1;MVCC=TRUE;DB_CLOSE_ON_EXIT=FALSE</connection-url>
+ <connection-property name="music_keyspace">
+ camunda
+ </connection-property>
+ <driver>mdbc</driver>
+ <security>
+ <user-name>sa</user-name>
+ <password>sa</password>
+ </security>
+ </datasource>
+ <drivers>
+ <driver name="mdbc" module="com.att.research.mdbc">
+ <driver-class>com.att.research.mdbc.ProxyDriver</driver-class>
+ </driver>
+ </drivers>
+</datasources>
+```
+
+Note: This assumes that you have built and installed the com.att.research.mdbc module within JBoss.
+
+### To Define a Tomcat DataSource Resource
+
+The following code snippet can be used as a guide when setting up a Tomcat DataSource resource.
+This snippet goes in the Tomcat *server.xml* file. As with the JBoss DataSource, you will
+probably need to make changes to the _connectionProperties_ attribute.
+
+```
+<Resource name="jdbc/ProcessEngine"
+ auth="Container"
+ type="javax.sql.DataSource"
+ factory="org.apache.tomcat.jdbc.pool.DataSourceFactory"
+ uniqueResourceName="process-engine"
+ driverClassName="com.att.research.mdbc.ProxyDriver"
+ url="jdbc:mdbc:./camunda-h2-dbs/process-engine;MVCC=TRUE;TRACE_LEVEL_FILE=0;DB_CLOSE_ON_EXIT=FALSE"
+ connectionProperties="myid=0;replicas=0,1,2;music_keyspace=camunda;music_address=localhost"
+ username="sa"
+ password="sa"
+ maxActive="20"
+ minIdle="5" />
+```
+
+## Databases Supported
+
+Currently, the following databases are supported with ETDB:
+
+* H2: The `H2Mixin` mixin is used when H2 is used with an in-memory (`jdbc:h2:mem:...`)
+or local file based (`jdbc:h2:path_to_file`) database.
+
+* H2 (Server): The `H2ServerMixin` mixin is used when H2 is used with an H2 server (`jdbc:h2:tcp:...`).
+
+* MySQL: The `MySQLMixin` mixin is used for MySQL.
+
+* MariaDB: The `MySQLMixin` mixin is used for MariaDB, which is functionally identical to MySQL.
+
+## Testing Mixin Combinations
+
+The files under `src/main/java/com/att/research/mdbc/tests` can be used to test various ETDB
+operations with various combinations of Mixins. The tests are controlled via the file
+`src/main/resources/tests.json`. More details are available in the javadoc for this package.
+
+## Limitations of ETDB
+
+* The `java.sql.Statement.executeBatch()` method is not supported by ETDB.
+It is not prohibited either; if you use this, your results will be unpredictable (and probably wrong).
+
+* When used with a DB server, there is some delay as dirty row information is copied
+from a table in the database, to the dirty table in Cassandra. This opens a window
+during which all sorts of mischief may occur.
+
+* ETDB *only* copies the results of SELECTs, INSERTs, DELETEs, and UPDATEs. Other database
+operations must be performed outside of the purview of ETDB. In particular, CREATE-ing or
+DROP-ing tables or databases must be done individually on each database instance.
+
+* Some of the table definitions may need adjusting depending upon the variables of your use
+of ETDB. For example, the MySQL mixin assumes (in its definition of the ETDB_TRANSLOG table)
+that all table names will be no more than 255 bytes, and that tables rows (expressed in JSON)
+will be no longer than 512 bytes. If this is not true, you should adjust, edit, and recompile.
+
+* ETDB is limited to only data types that can be easily translated to a Cassandra equivalent;
+e.g. BIGINT, BOOLEAN, BLOB, DOUBLE, INT, TIMESTAMP, VARCHAR
+
+* To find the data types that your database is currently using run the following command:
+SELECT DISTINCT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA='<your db name here>';
diff --git a/docs/histo_graph.svg b/docs/histo_graph.svg
new file mode 100644
index 0000000..e664aaf
--- /dev/null
+++ b/docs/histo_graph.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="542px" height="271px" version="1.1"><defs/><g transform="translate(0.5,0.5)"><ellipse cx="22" cy="131" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="280" cy="131" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><path d="M 42 131 L 42 131" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 42 131 L 42 131 L 42 131 L 42 131 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><ellipse cx="110" cy="72" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="110" cy="187" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><path d="M 42 131 L 56 131 Q 66 131 66 121 L 66 82 Q 66 72 72.95 72 L 79.9 72" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 86.65 72 L 77.65 75 L 79.9 72 L 77.65 69 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 42 131 L 56 131 Q 66 131 66 141 L 66 177 Q 66 187 72.95 187 L 79.9 187" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 86.65 187 L 77.65 190 L 79.9 187 L 77.65 184 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><ellipse cx="280" cy="22" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="280" cy="67" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="360" cy="101" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><path d="M 300 131 L 310 131 Q 320 131 320 121 L 320 111 Q 320 101 324.95 101 L 329.9 101" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 336.65 101 L 327.65 104 L 329.9 101 L 327.65 98 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 300 67 L 310 67 Q 320 67 320 77 L 320 91 Q 320 101 324.95 101 L 329.9 101" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 336.65 101 L 327.65 104 L 329.9 101 L 327.65 98 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><ellipse cx="440" cy="159" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><path d="M 380 101 L 390 101 Q 400 101 400 111 L 400 149 Q 400 159 404.95 159 L 409.9 159" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 416.65 159 L 407.65 162 L 409.9 159 L 407.65 156 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 220 217 L 310 217 Q 320 217 320 207 L 320 169 Q 320 159 330 159 L 409.9 159" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 416.65 159 L 407.65 162 L 409.9 159 L 407.65 156 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><ellipse cx="200" cy="44" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="200" cy="102" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="200" cy="159" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><ellipse cx="200" cy="217" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><path d="M 130 72 L 145 72 Q 155 72 155 62 L 155 53 Q 155 44 162.45 44 L 169.9 44" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 176.65 44 L 167.65 47 L 169.9 44 L 167.65 41 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 220 44 L 230 44 Q 240 44 240 34 L 240 28 Q 240 22 244.95 22 L 249.9 22" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 256.65 22 L 247.65 25 L 249.9 22 L 247.65 19 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 220 44 L 230 44 Q 240 44 240 54 L 240 60.5 Q 240 67 244.95 67 L 249.9 67" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 256.65 67 L 247.65 70 L 249.9 67 L 247.65 64 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 220 102 L 230 102 Q 240 102 240 112 L 240 121.5 Q 240 131 244.95 131 L 249.9 131" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 256.65 131 L 247.65 134 L 249.9 131 L 247.65 128 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 130 72 L 145 72 Q 155 72 155 82 L 155 92 Q 155 102 162.45 102 L 169.9 102" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 176.65 102 L 167.65 105 L 169.9 102 L 167.65 99 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 220 159 L 230 159 Q 240 159 240 149 L 240 140 Q 240 131 244.95 131 L 249.9 131" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 256.65 131 L 247.65 134 L 249.9 131 L 247.65 128 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 130 187 L 145 187 Q 155 187 155 177 L 155 168 Q 155 159 162.45 159 L 169.9 159" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 176.65 159 L 167.65 162 L 169.9 159 L 167.65 156 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><ellipse cx="520" cy="131" rx="20" ry="20" fill="#496e7a" stroke="#ffffff" stroke-width="4" pointer-events="none"/><path d="M 300 22 L 430 22 Q 440 22 440 32 L 440 121 Q 440 131 450 131 L 489.9 131" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 496.65 131 L 487.65 134 L 489.9 131 L 487.65 128 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(34.5,233.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="63" height="36" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 32px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 63px; white-space: nowrap; word-wrap: normal; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Split</div></div></foreignObject><text x="32" y="34" fill="#000000" text-anchor="middle" font-size="32px" font-family="Helvetica">Split</text></switch></g><g transform="translate(354.5,233.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="90" height="36" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 32px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 92px; white-space: nowrap; word-wrap: normal; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Merge</div></div></foreignObject><text x="45" y="34" fill="#000000" text-anchor="middle" font-size="32px" font-family="Helvetica">Merge</text></switch></g><path d="M 130 187 L 145 187 Q 155 187 155 197 L 155 207 Q 155 217 162.45 217 L 169.9 217" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 176.65 217 L 167.65 220 L 169.9 217 L 167.65 214 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 460 159 L 470 159 Q 480 159 480 149 L 480 140 Q 480 131 484.95 131 L 489.9 131" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><path d="M 496.65 131 L 487.65 134 L 489.9 131 L 487.65 128 Z" fill="#00a8e0" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/></g></svg> \ No newline at end of file
diff --git a/docs/redo_arch.svg b/docs/redo_arch.svg
new file mode 100644
index 0000000..680c5ba
--- /dev/null
+++ b/docs/redo_arch.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="511px" height="333px" version="1.1"><defs/><g transform="translate(0.5,0.5)"><rect x="36" y="1" width="110" height="50" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(59.5,19.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="62" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">ThreadPool</div></div></foreignObject><text x="31" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">ThreadPool</text></switch></g><rect x="191" y="11" width="110" height="30" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(204.5,19.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="83" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Task: Runnable</div></div></foreignObject><text x="42" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">Task: Runnable</text></switch></g><rect x="36" y="201" width="110" height="30" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(52.5,209.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="76" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">RedoManager<br /></div></div></foreignObject><text x="38" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">RedoManager&lt;br&gt;</text></switch></g><rect x="1" y="101" width="180" height="50" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(17.5,112.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="147" height="26" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">TaskPriorityQueue: <br />BlockingQueue&lt;Runnable&gt;<br /></div></div></foreignObject><text x="74" y="19" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">TaskPriorityQueue: &lt;br&gt;BlockingQueue&lt;Runnable&gt;&lt;br&gt;</text></switch></g><rect x="267" y="110" width="158" height="30" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(281.5,118.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="128" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">PrefetchedData:  Cache<br /></div></div></foreignObject><text x="64" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">PrefetchedData:  Cache&lt;br&gt;</text></switch></g><rect x="371" y="204" width="138" height="40" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(384.5,210.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="111" height="26" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">«interface»<br /><b>RedoManagerLogic</b></div></div></foreignObject><text x="56" y="19" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">«interface»&lt;br&gt;&lt;b&gt;RedoManagerLogic&lt;/b&gt;</text></switch></g><path d="M 163.97 26 L 186.53 26" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 147.97 26 L 155.97 21.29 L 163.97 26 L 155.97 30.71 Z" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 174.76 33 L 188.76 26 L 174.76 19" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(147.5,8.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;background-color:#ffffff;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><rect x="23.5" y="281" width="135" height="50" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(39.5,299.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="103" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">HistoryGraph: DFG<br /></div></div></foreignObject><text x="52" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">HistoryGraph: DFG&lt;br&gt;</text></switch></g><path d="M 91 201 L 91 155.47" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="none"/><path d="M 98 167.24 L 91 153.24 L 84 167.24" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(80.5,170.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="20" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Use</div></div></foreignObject><text x="10" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">Use</text></switch></g><path d="M 91 51 L 91 96.53" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="none"/><path d="M 84 84.76 L 91 98.76 L 98 84.76" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(80.5,70.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="20" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Use</div></div></foreignObject><text x="10" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">Use</text></switch></g><path d="M 301 26 L 336 26 Q 346 26 346 36 L 346 105.53" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="none"/><path d="M 339 93.76 L 346 107.76 L 353 93.76" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(335.5,40.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="20" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Use</div></div></foreignObject><text x="10" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">Use</text></switch></g><path d="M 146 209 L 336 209 Q 346 209 346 199 L 346 144.47" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="none"/><path d="M 353 156.24 L 346 142.24 L 339 156.24" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(270.5,203.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="20" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Use</div></div></foreignObject><text x="10" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">Use</text></switch></g><path d="M 163.97 224 L 366.53 224" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 147.97 224 L 155.97 219.29 L 163.97 224 L 155.97 228.71 Z" fill="#000000" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 354.76 231 L 368.76 224 L 354.76 217" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(175.5,209.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><path d="M 91 248.97 L 91 276.53" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 91 232.97 L 95.71 240.97 L 91 248.97 L 86.29 240.97 Z" fill="#000000" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 84 264.76 L 91 278.76 L 98 264.76" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(92.5,240.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><path d="M 219 41 L 228.5 41 Q 238 41 238 51 L 238 161 Q 238 171 228 171 L 129 171 Q 119 171 119 181 L 119 194.29" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" stroke-dasharray="9 9" pointer-events="none"/><path d="M 113.5 186.65 L 119 197.65 L 124.5 186.65" fill="none" stroke="#00a8e0" stroke-width="3" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(222.5,131.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="31" height="36" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 32px; font-family: Helvetica; color: black; line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;"><font style="font-size: 12px">return</font></div></div></foreignObject><text x="16" y="34" fill="black" text-anchor="middle" font-size="32px" font-family="Helvetica">[Not supported by viewer]</text></switch></g></g></svg> \ No newline at end of file
diff --git a/docs/specs.md b/docs/specs.md
new file mode 100644
index 0000000..656f045
--- /dev/null
+++ b/docs/specs.md
@@ -0,0 +1,676 @@
+# Architecture
+
+## Music/Cassandra Tables
+
+### Transaction Information Table (TIT)
+
+This table contains the entities that are locked to perform a transaction on a partition, it contains an ordered array that contains information of the transactions.
+
+A row is associated with a specific partition.
+
+There can be more than one row associated with a partition. But only one is active at any given time. There are two main reasons associated with creating a new TIT row:
+* Partition merge/split
+* The REDO array is too big, and it's better to start a new row
+
+This is a type of table, and there can be many instances of this table in Music. Reasons to have more than one table:
+* One table per replication policy, for example, there could be one TIT table with a replication factor of 3 and one TIT table with a replication factor of 5.
+* Other policy requirements
+
+#### Columns
+
+* **id**: UUID
+ - The id of this row
+ - **Note**: Later we could force the index to follow a certain pattern, such that the rows of a given MDBC Server are located as close as possible to that server, without violating any other policy associated with the data.
+* **redo**: Array&lt;Tuple&lt;Text,Tuple&lt;Text,varint>>
+ - Array (order-matters) of &lt;TableName,Index> associated with the Redo Records that were applied to this partition
+* **partition**: UUID
+ - The id of the partition associated with this transaction
+* **latestapplied**: Int
+ - Integer associated with the latest RedoRecord applied from the Redo column into the data tables in Music.
+* **applied**: boolean
+ - Flag that indicates that this row Tx's were already committed to the data tables
+
+#### Primary
+
+(Partition)
+
+#### Clustering
+
+(Index)
+
+### Redo Records Table (RRT)
+
+This table is the one that contains the TransactionsDigests.
+
+There is one row per transaction.
+
+There is no need to lock on this table.
+
+This is an append/remove only table, no updates are going to be performed.
+* Removes are an optimization, not a correctness requirement
+* Removes are only executed when the transaction was completely applied to the data tables and all tables that are pointing to this row are already removed.
+
+#### Columns
+
+* **leaseid**: text
+ * Id of the lease that was used to process the transaction associated with the row in TIT
+* **leasecounter**: varint
+ * Transaction number (counter of the transactions performed so far using the lock in leaseid)
+* **transactiondigest**: text
+ * Serialized transaction digest, can be considered a blob
+
+#### Primary
+
+(leaseid,leasecounter)
+
+#### Clustering
+
+None
+
+### TableToPartition Table (TTP)
+
+This table maps each table to the current (and previous) partitions.
+
+#### Columns
+
+* **TableName**: Text
+ - Name of the table to which partitions are being associated
+* **Partition**: UUID
+ - Current partition that holds this table
+* **PreviousPartitions**: Set&lt;UUID>
+ - Name of all the previous partitions (inclusive of the current partition)
+
+#### Primary
+
+(Table)
+
+#### Clustering
+
+None
+
+### PartitionInfo Table (PI)
+
+This table contains information about a partition. Contains information about the latest Tit row to be locked if this partition wants to be held. The tables associated with this partition and other information of the partition.
+
+#### Columns
+
+* **Partition**: UUID
+ - Name of the partition that this row describe
+* **LatestTitTable**: Text
+ - Name of the table that contains the latest TransactionInformation row associated with this partition
+* **LatestTitIndex**: UUID
+ - Latest index (row) in the previous table, that is currently is being updated with transactions in this partition
+* **Tables**: Set&lt;Text>
+ - All tables that are contained within this partition
+* **ReplicationFactor**: Int
+* **CurrentOwner**: Text
+ - URL address associated with the current owner
+
+#### Primary
+
+(Partition)
+
+#### Clustering
+
+None
+
+### RedoHistory Table (RH)
+
+This table represents the Directed Graph that forms the history of REDO logs. Given that we create new REDO logs on each new repartition (or due to other reasons), we need to keep track of the changes and the order of the REDO logs. An example of the repartitions can be seen in [figure 3](#figure-3).
+
+#### Columns
+* **Partition**: uuid
+ - Partition
+* **RedoTable**:Text
+ - TIT table at this point in time for the Partition
+* **RedoIndex**:UUID
+ - TIT Index for the previous Table
+* **PreviousRedo**: Set&lt;Tuple&lt;Text,UUID>>
+ - Contain
+
+#### Primary
+(Partition)
+
+#### Clustering
+(RedoTable,RedoIndex)
+
+## Server Architecture
+
+As shown in [figure 1](#figure-1). The MDBC server is composed of the following components:
+
+* **RunningQueries**
+* **LocalStagingTable**
+* **MusicInterface**
+* **MusicSQLManager**
+* **SQL**
+
+### Figure 1
+![Server Architecture](uml.svg "MDBC Server UML")
+
+**Server Architecture**
+
+### RunningQueries
+This is an in-memory data structure that contains the progress of each transaction being executed. Each transaction holds information about:
+
+* **LTxId:** local transaction id, unsigned integer
+* **CommitRequested** bool indicating if the user tries to commit the request already.
+* **SQLDone:** bool indicating if SQL was already committed, atomic bool
+* **MusicDone:** bool indicating if music commit was already performed, atomic bool
+* **Connection:** reference to a connection object. This is used to complete a commit if it failed in the original thread.
+* **Timestamp:** last time this data structure was updated
+* **RedoRecordId:** id of the redo record when it was already added to the RRT (it contains the info to the table and index)
+
+### LocalStagingTable
+
+This is a serializable in-memory data-structure that contains the information about the changes that were updated by the SQL database for a given transaction. When the transaction is committed, this staging table is freeze and serialized to a string, and committed to Music. See the [algorithms section](#transaction-commit).
+
+There is one table per client connection.
+
+It has one main operation:
+
+```Java
+void addOperation(String key, OperationType op, JSONObject oldValue, JSONObject newValue);
+```
+
+### MusicInterface
+
+This is the layer that interacts with Music. There is only one instance per MDBC server. It is in charge of holding locks and executing get/puts to Music, following the corresponding locking mechanisms. This object is also used in the MusicSQLManager, so a reference to it is passed when a MusicSQLManager is created.
+
+### MusicSQLManager
+
+When a connection is created from a new MDBC client, a Connection object is created. MusicSQLManager is the object that handles the main operations of a connection that translates operation between SQL and Music.
+
+There is one object of this type for each client connection.
+
+### SQL or DBInterface
+
+This interface the main operations that the MusicSQLMAnager performs with the SQL layer.
+
+## REDO Recovery Architecture
+
+[Figure 2](#figure-2) shows the main components used to recover using the REDO log. First, we are going to analyze what is the REDO history associated with a given node, and then describe each of the components in figure 2
+
+### Figure 2
+![REDO Architecture](redo_arch.svg "REDO Spec UML")
+
+**Redo Spec Architecture**
+
+### REDO History
+
+Given that new Redo log rows are created in TIT each time, the system is repartitioned, then a history is created as shown in [figure 3](#figure-3). Each node represents a give TIT row, and the graph is a directed acyclic graph.
+
+#### Figure 3
+![REDO History Directed Graph](histo_graph.svg "REDO History Directed Graph")
+
+**Redo History Directed Graph**
+
+#### Properties
+
+* The are only two types of repartition: split in two or merge two.
+* Each partition can only be split into two partitions
+* Only two partitions can be merged into a new partition
+* Sometimes one node is transformed into a new node, without split or merge. This can happen due to the reasons explained in previous sections, such as a TIT row being too big.
+* Each partition is going to have a new different name (e.g. use UUID to name them)
+* On partition, a new TIT row is created
+* The replication policy used is the max of all the tables that are contained within the partition.
+
+#### Node
+
+Each node in the graph contains the following information
+
+* **RedoTable**: info from RH table
+* **RedoIndex**: info from RH table
+* **Tables**: tables that are going to be recovered in that node, obtained from the partition in RH table.
+* **TIT Metadata Read**: it is a boolean flag that indicates that the associated metadata was already downloaded.
+* **ParentsDoneCounter**: it is a counter that is increased each time one of the parents of the node was successfully applied.
+* **LastAppliedChange**: last change applied from this Redo Index to this local MDBC server.
+* **TxDigestDownloaded**: a boolean flag that indicates that the TxDigest download task was already downloaded
+
+All this information is used to implement the algorithms to parallelize the REDO operations.
+
+### Redo Recovery Execution
+
+We decomposed the recovery algorithm into multiple independent tasks that are going to be executed by a pool of threads. There are three main types of tasks:
+
+1. Download Metadata (embarrassingly parallel)
+2. Download Transaction Digests (embarrassingly parallel)
+3. Apply Digests (Sequential with concurrency)
+
+To save memory usage, and avoid pulling too much data before it is actually consumed, we cannot perform this tasks in this same order. Additionally, prefetched data can be erased, if there is too much data downloaded that was not committed in time.
+
+### RedoManager
+
+This is the entity in charge of defining the next tasks to execute and to initialize the REDO recovery and all the required data structures, including the thread pool.
+
+### RedoManagerLogic
+
+This is the logic within the redo manager. It is created to allow extension of the algorithms for selecting the tasks.
+
+#### Interface
+```java
+void init(HistoryGraph history);
+List<Task> getInitialTasks();
+List<Task> setCompleted(Task completedTask);
+boolean isDone();
+```
+
+### PrefetchedData
+
+This is going to contain the results of tasks of type 1. and 2. If the recovery algorithm overprovisioned memory, the RedoManager can request to delete the least important data (further to the right in [figure 3](#figure-3)), and it would return the task that was used to generate it. Such that it can be read to the TaskPriorityQueue
+
+
+#### Interface
+```java
+Metadata getMetadata(int NoodeIdx);
+Digest getTransaction(int NodeIdx, String table, UUID row);
+Task deleteLessImportant();
+void addMetadata(int NodeIdx, Metadata meta, Task task);
+void addDigest(int NodeIdx, String table, UUID row, TxDigest tx, Task task);
+long getSize();
+```
+
+### HistoryGraph
+
+This is the object that is going to model the REDO history and will be used to create the plan for recovery. Additionally, it would hold the required data to select the next tasks to perform by the RedoManager. It contains basic graph operations used by the RedoManager.
+
+#### Interface
+```java
+long getSize();
+void addNode(String redoTable, String redoIndex);
+void addFutureLink(String redoTable, String redoIndex, String previousRedoTable, String previousRedoIndex);
+List<Nodes> getRoots();
+List<Nodes> getParents(Node node);
+List<Nodes> getChildren(Node node);
+List<Nodes> getChildren(List<Node> node);
+Iterator<Nodes> getAll();
+Node getNode(String id);
+void deleteTableFromPredecessor(String id, String table);
+void increaseParentCompleted(String id);
+void readyToRun(String id, PrefetchedData data);
+void setMetadataDownloaded(String id);
+void setDigestDownloaded(String id);
+void setRestored(String id);
+```
+
+### TaskPriorityQueue
+This is a special type of priority queue, used to hold the following tasks to be executed by the thread pool. The priority is defined by a combination of operation and the node that is associated.
+
+The tasks have the following priorities:
+1. DownloadMetadata: 3
+2. DownloadDigests: 2
+3. ApplyDigest: 4 (highest)
+4. DownloadHistory: 1 (lowest)
+
+These priorities mean, that we focus on applying the digest more than downloading, tasks of type 3 are only created when all the related data was already downloaded, and the predecessors were already applied.
+
+The nodes have the following priorities. We enumerate the nodes in the HistoryGraph using Breath-first search, the lower the number, the higher the priority. Intuitively, what this means is that nodes closer to the root, e.g. that are going to be executed first in the recovery, are downloaded first.
+
+#### Interface
+```java
+void AddOp(Task task, PriorityLevel prio, String nodeId);
+void AddOps(List<Task> tasks) {
+
+});
+Task GetNextTask();
+boolean freezeBelowLevel(PriorityLevel prio);// This function is only used when the memory is overprovisioned, and we don't want to allow more tasks of type 1 and 2 but is left generic, for future use
+boolean restart();// To be used after a freeze was applied
+```
+
+### ThreadPool
+
+Normal Java ThreadPool. It's composed of a set of available threads that will run the threads that are stored in the TaskPriorityQueue.
+
+### Task
+
+This inherits from Runnable. And it executes one of three types of tasks that was presented in the section [Redo Recovery Execution](#redo-recovery-execution). When this operation is over, it indicates the RedoManager, that the task was successfully completed. Additionally, it calls the corresponding functions in the HistoryGraph
+
+## Algorithms
+
+This section describes the main operations that are performed in an ETDB system.
+
+### Bootup
+```python
+def boot(tables):
+ # This function get the set of partitions that are currently associated with the tables
+ # and also the set of all the partitons that have been associated with those tables
+ P,oldP = getAllPartitionsFromTTP(tables)
+ # Lock all partitions, this is done using the Music Interface
+ for p in P:
+ locked = lockPartition(p)
+ # If lock was not succesful
+ if not locked:
+ # Explained in another section
+ result = requestLockRelease(p,"FullOwnership",Max)
+ if not result:
+ raise exception
+ else:
+ locked = lockPartition(p)
+ if not locked:
+ raise exception
+ # Merge all partitions, explained in another section, using MusicInterface
+ mergePartitions(tables,P,this.address)
+ # Pull data from Music data tables, using the MusicSQLManager
+ pullDataFromMusic(tables,P)
+ # Apply Redo, using the Redo Recovery
+ RedoRecovery(tables,P,oldP)
+```
+
+#### Request Lock Release
+```python
+def requestLockRelease(p,ownershipType,priority):
+ # Obtain the url of the owner from the PI table
+ owner = getCurrentOwnerFromPI(p)
+ # Request ownership using GRPC
+ # Current owner receives the query and using the ownership type and prioriy, it decides is if release it
+ # Releases the lock if required
+ # Replies with decision
+ result = remoteOwnershipRequest(owner,p,ownershiptType,priority)
+ return result
+```
+
+#### Partitions Merge
+```python
+def mergePartitions(tables,partitions,newOwner):
+ # Create a new name using cassandra UUID
+ newPartitionName = createNewPartitionName()
+ # Assumption: locks are already held by the local ET node
+ replicationPolicy = getMaxReplicationFactor(tables)
+ # Reuse Tit Table if possible, if not create a new one with the corresponding policy
+ # Why this? The TIT table should be close to this ETDB node
+ titTableName = getTitTableWithReplicationFactor(replicationPolicy)
+ # This function creates new row in the table titTableName
+ # The row has the following values: an empty redo list, lastApplied = -1
+ # and applied = false
+ titIndex = musicInterface.lockAndCreateNewRow(titTableName,newPartitionName)
+ # this function should change the RedoRecordsTable used for transactions in partition newPartitionName, this is changed to the local state, not in Music
+ # this is going to create a new records table if required
+ # Again the redo records table should be close to this ETDB node
+ changeRedoRecordsTable(newPartitionName,replicationPolicy)
+ for table in tables:
+ # This function changes the TTP table
+ # It appends newPartitionName to PreviousPartitions and change Partition to newPartitionName
+ # into the row with table "table"
+ # This needs to be performed with an atomicPut
+ musicInterface.appendNewPartitionToTTP(table,newPartitionName)
+ # Create a new row in PI with the corresponding information
+ musicInterface.addPartitionToPI(newPartitionName,titTableName,titIndex,tables,replicationPolicy.factor,newOwner)
+ previousRedo = []
+ # Get the previous redo locations to create the redohistory node
+ for partition in partitions:
+ # This is an atomic get to the table Partition Info
+ partitionInfo = musicInterface.getPartitionInfo(partitionk)
+ previousRedo.append([partitionInfo.LatestTitTable,partitionInfo.LatestTitIndex])
+ # Add row to RH table
+ musicInterface.addRedoHistoryRow(newPartitionName,titTableName,titIndex,previousRedo)
+```
+
+### Transaction Commit
+
+#### Steps for a query in which ETDB node already owns the tables
+1. The query is submitted to the local MDBC client
+2. Local transaction Id is generated for the query submitted (LTxId)
+3. An element is appended to local data structure [RunningQueries](#runningqueries) using LTxId as the index.
+4. If required data is fetched from MUSIC
+ * Pull the set of keys
+ * Do an atomic get on each of the keys, currently it pulls the whole table
+ * **Why atomic gets and not locking the key**
+ * If the MDC node can hold all the required data for a node, then it would need to pull the data only once from MUSIC, given that not outside entity could have a lock for the range
+ * Redo Recovery, see [Redo](#redo-recovery-to-mdbc-server) for the corresponding variables
+5. The query is executed in the local MDBC SQL Tx Database
+ * Each new database change (insert,write,delete) is saved into the staging table (LST), see [Staging Table](#localstagingtable)
+6. The client sends a commit request to the MDBC driver, RunningQueries (RQ) data structure is updated.
+7. Generate a local Commit Id (CommitId), an unsigned integer that is monotonically increasing.
+8. Commit transaction to Music
+ * Push new row to RRT (Quorum Write), no lock required
+ * The submission contains the following components:
+ * **LeaseId**: id associated with the lease related to the transaction table;
+ * **Counter**: is the CommitId generated
+ * **Operations**: a serialized digest of staging table
+ * Save RRT index to RQ
+ * Append (LeaseId, Counter) to Redo Array in the corresponding TIT table and row (Lock has to be already be locked)
+ * Update RQ progress
+9. Mdbc commits to local SQL database
+ * Update RQ when local SQL is completed
+10. SQL DB returns commit the result
+
+#### Assumptions in this transaction commit
+* Locks are already held for the partition
+* All tables are within that partition
+* Each owner only owns one partition at any time (**This is a really important assumption**)
+ * If a transaction is executed over more than one partition at any time. There is a requirement to do an additional operation, that indicates if the whole transaction was completed. If we are not smart, then a transaction would take at least 4 quorum operations.
+
+### Redo Recovery to MDBC Server
+
+```python
+ def RedoRecovery(tables,cachedTables,numOfThreads=numCores):
+ # Initialize Redo data structures
+ logic = initRedoManagerLogic(historyGraph)
+ restoreTasks = initTaskPriorityQueue()
+ pool = initThreadPool(numOfThreads,restoreTasks)
+ prefetchData = initPrefetchData()
+ # Obtain all the partitions from TTP table
+ P,oldP = getAllPartitionsFromTTP(tables)
+ # Data structure that contains the partitions to which the history was already downloaded
+ historyDownloadedSet=initEmptySet()
+ # Get the set of all partitions
+ allP = union(P,oldP)
+ # Fetch the history associated with each partition
+ # This can be done in parallel for each partition
+ # This is a blocking call
+ # Implementation: it adds all the fetch operations to the task
+ H = parallelFetchHistory(allP)
+ # Generate the history graph based on this information. As explained in previous sections
+ historyGraph = createHistoryGraph(tables,allP,H)
+ # Number the nodes in the graph using BFS, this returns a map of nodeNumber to nodeId
+ numberToIdMap = numberNodes(historyGraph)
+ # If the local node already hold information about this partition, due to pre warmup (e.g. secondary backup)
+ if cachedTables:
+ cleanHistoryGraph(cachedTables)
+ # This is a blocking call that downloads all metadata
+ parallelFetchMetadata(historyGraph.getAll())
+
+ # There are multiple of ways to order the following operations
+ # We choose this order to reduce to possibility of overcommiting memory
+ tasks = logic.getInitialTasks()
+ restoreTasks.addOps(tasks)
+
+```
+
+#### Implementation notes
+* Before doing a Redo Recovery, we assumed that we already pull all the data from the data tables, either directly or using the dirty tables.
+* When using CacheWarming (e.g. secondary backup), we keep a per table Latest Redo Applied. This means that for all parents of this history node was already applied to this table. And they can be erased from historyGraph.
+* Each time a task completes, it calls the function setCompleted(TaskInfo) in the RedoManager
+* The Archive Process should be stop completely when a Redo recovery is being performed.
+
+#### parallelFetchMetadata
+```python
+ def parallelFetchMetadata(historyNodes):
+ this.metadataReadyCounter = 0
+ # initialize the tasks
+ tasks=[]
+ # Create the tasks to download all the metadata for nodes that
+ # still have tables to be recovered completely
+ for node in historyNodes:
+ restoreTasks.addOp(createMetadataDownloadTask(node),MetadataPriority,node.id)
+ metadataReady.wait()
+```
+
+#### getInitialTasks
+
+```python
+ def getInitialTasks():
+ # We start downloading the digests for the first two levels
+ rootNodes = historyGraph.getRoots()
+ rootChildren = historyGraph.getChildren(rootNodes)
+ firstTwoLevelNodes = union(rootNodes,rootChildren)
+ firstTwoLevelNodes = deleteAlreadyApplied(firstTwoLevelNodes)
+ tasks.append(createTxDigestDownloadForNodes(firstTwoLevelNodes))
+ return tasks
+```
+
+#### setCompleted: RedoManager
+
+```python
+ def setCompleted(taskInfo):
+ # Complete the tasks associated, and return the new tasks to execute
+ newTasks = logic.setCompleted(taskInfo)
+ # If new tasks added
+ if len(newTasks) != 0:
+ # Add them to run in the thread pool
+ restoreTasks.addOps(newTasks)
+ else:
+ # If not more tasks, check if Redo recovery is over
+ if(logic.isDone())
+ # If over, signal that was completed
+ complete.signal()
+```
+
+#### setCompleted: RedoManagerLogic
+
+This is one of the possible implementations of this function for the redo manager logic.
+
+```python
+ def setCompleted(taskInfo):
+ tasks=[]
+ # If task completed is of Apply digest
+ if taskInfo.type == HistoryDownload:
+ historyDownloadedSet.add(taskInfo.partition)
+ if len(allP) == len(historyDownloadedSet):
+ # Signal that the history is complete
+ historyReady.signal()
+ elif taskInfo.type == ApplyDigest:
+ # Set node as completed
+ historyGraph.setRestored(taskInfo.node)
+ # Start downloading the tx digest for the grandchildren of this node
+ nodeChildren = historyGraph.getChildren(taskInfo.node)
+ nodeGrandchildren = historyGraph.getChildren(nodeChildren)
+ # This function uses the metadata and information about the tables that were already applied locally, to only fetch data for useful transactions
+ nodeGrandchildren = deleteAlreadyApplied(firstTwoLevelNodes)
+ tasks.append(createTxDigestDownloadForNodes(nodeGrandchildren))
+ # For each children, increase the parent completed and
+ # check if they are now able to run
+ for node in nodeChildren:
+ historyGraph.increaseParentCompleted(node)
+ if historyGraph.readyToRun(node,prefetchData):
+ tasks.append(newApplyDigestTask(node))
+ else:
+ # If task completed is metadata download
+ if taskInfo.type == MetadataDownload:
+ ## Atomic increase on counter
+ metadataReadyCounter+=1
+ # Record this in the historygraph
+ historyGraph.setMetadataDownloaded(taskInfo.node)
+ if metadataReadyCounter == historyGraph.getSize():
+ metadataReady.signal()
+ # If task completed is digest download
+ elif taskInfo.type == DigestDownload:
+ # Record this in the historygraph
+ historyGraph.setDigestDownloaded(taskInfo.node,taskInfo.table,taskInfo.tableId)
+ # Check if node is now able to apply the digest
+ if historyGraph.readyToRun(taskInfo.node,prefetchData):
+ # Add task if ready
+ tasks.append(newApplyDigestTask(taskInfo.node))
+ return tasks
+```
+
+In the previous pseudocode:
+* **createTxDigestDownloadForNodes**: iterates over the nodes, and for each node, it reads the corresponding metadata in prefetchData, and create a download transaction for each of the elements in the Redo Array
+
+#### Metadata Download Task
+
+This is the runanble that downloads the metadata associated to a specific node in the history graph.
+
+Each runnable is assigned an specific node in the history graph, which in the pseudocode is call "node". Additionally it hold information about the task, and pointers to the
+
+```python
+ def run():
+ # This is an atomic get to the corresponding row in the TIT
+ # At this point nobody else should hold a lock to this row
+ # Optmization: perform this async and return the thread to the thread pool
+ # -> Problem: completed logic to know when to finish this thread
+ txInformation = musicInterface.downloadTITInformation(node.partition,node.index)
+ # Add the metadata to the prefetch data object
+ prefetchData.addMetadata(node.id,txInformation,this.task)
+ # Let the resource manager know that the task was completed succesfully
+ resourceManager.setCompleted(this.task)
+```
+
+#### Transaction Digest Download Task
+
+```python
+ def run():
+ # For each of the transactions executed in this TIT row
+ for redoRecordTable,redoRecordIndex in this.task.transactions:
+ # This is a quorum read that download the digest and associated data from RRT
+ txDigest = musicInterface.downloadTxDigestFromRRT(redoRecordTable,redoRecordIndex)
+ # Add digest to the prefetch data
+ prefetchData.addDigest(node.id,redoRecordTable,redoRecordIndex,this.task)
+ # Let the resource manager know that the task was completed succesfully
+ resourceManager.setCompleted(this.task)
+```
+
+**Assumption**:
+* Metadata was already downloaded in the first stage
+
+#### Apply Transaction Task
+
+```python
+ def run():
+ node = historyGraph.getNode(task.id)
+ metadata = prefetchData.getMetadata(node)
+ for index in range(max(0,metadata.latestApplied),len(metadata.redo)):
+ redoRecordTable,redoRecordIndex = metadata.redo[index]
+ # Get the tx digest from the prefetched data
+ txDigest = prefetchData.getTransaction(this.task.id,redoRecordTable,redoRecordIndex)
+ # Deserialize the tx digest
+ stagingTable = deserializeDigest(txDigest)
+ # For each table in the staging table
+ for table in stagingTable:
+ # For each row in the staging table
+ for row in stagingTable[table]:
+ # For each operatino that was applied to that row during the transaction
+ for op in row:
+ # Applied the operation in order
+ MusicSQLManager.applyOperation(op.type,op.old,op.new)
+ resourceManager.setCompleted(this.task)
+
+
+```
+
+**Assumptions**
+* All data is already prefetched to be applied by the apply digest task
+
+#### Download History Task
+
+```python
+ def run():
+ # Download all the rows that are associated with this partition
+ history = musicInterface.downloadPartitionRedoHistory(task.partition);
+ # For each row
+ for row in history:
+ # Create a node and corresponding link in the historyGraph
+ historyGraph.addNode(row.redoTable,row.redoIndex)
+ # It is a future link because the next node was probably not downloaded yet
+ historyGraph.addFutureLink(row.redoTable,row.redoIndex,row.previousRedoTable,row.previousRedoIndex)
+ # Let the resource manager know that the task was completed succesfully
+ resourceManager.setCompleted(this.task)
+```
+
+### Redo Commit to Data Table
+
+We do the same as the Redo Recovery to MDBC server, but instead of pushing to the local SQL database, we push towards the Music data tables. Additionally, we need to update all the corresponding tables.
+
+The specific differences are:
+* For each redo record applied, we need to increase the counter in LatestApplied for the corresponding row in TIT.
+* Once all the Redo Records in a TIT row are applied, then we need to change the applied flag in the corresponding TIT row.
+
+
+## TODO
+* Analyze the possible states during a failure in partition merging. Some ideas:
+ - We probably need to create a new table that keeps the id of the partition that was created
+ and then we need to clean up all the changes that were performed due to this new partition operation (split/merge)
+ - If the partition id is still in this table and there is no lock on the corresponding TIT table (if it was even created), then we need to clean all the changes, and restart the partion operation (if required)
+* Analyze the garbage collection of table information after commiting to the data tables. Some ideas:
+ - The rows in RRT can be erased after being applied completely into Music data tables
+ - A TIT row can be erased when all the elements in the Redo Array where already applied to the data tables. When a TIT row is erased, all the children of that node need to be change to point to null
+ - A PI row table can be erased when there are no more TIT rows or RH rows that point to it. When a PI row is deleted, then the corresponding value need to be erased from the TTP Previous partitions for all the table that are part of that PI row.
diff --git a/docs/uml.svg b/docs/uml.svg
new file mode 100644
index 0000000..8d6dd52
--- /dev/null
+++ b/docs/uml.svg
@@ -0,0 +1,2 @@
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="598px" height="263px" version="1.1"><defs/><g transform="translate(0.5,0.5)"><rect x="1" y="106" width="110" height="50" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(18.5,124.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="74" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">MDBC Server</div></div></foreignObject><text x="37" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">MDBC Server</text></switch></g><rect x="206" y="106" width="120" height="50" fill="none" stroke="#468e9f" stroke-width="3" transform="translate(266,0)scale(-1,1)translate(-266,0)" pointer-events="none"/><g transform="translate(214.5,124.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="103" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">MusicSQLManager</div></div></foreignObject><text x="52" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">MusicSQLManager</text></switch></g><g transform="translate(178.5,116.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="18" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 20px; white-space: nowrap; word-wrap: normal; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">0..*</div></div></foreignObject><text x="9" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">0..*</text></switch></g><rect x="206" y="211" width="120" height="50" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(236.5,222.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="59" height="26" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">«interface»<br /><b>Music</b></div></div></foreignObject><text x="30" y="19" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="466" y="76" width="130" height="50" fill="none" stroke="#468e9f" stroke-width="3" transform="translate(531,0)scale(-1,1)translate(-531,0)" pointer-events="none"/><g transform="translate(501.5,87.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="59" height="26" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">«interface»<br /><b>SQL</b></div></div></foreignObject><text x="30" y="19" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 343.97 119 L 386 119 Q 396 119 396 110 L 396 105.5 Q 396 101 406 101 L 461.53 101" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 327.97 119 L 335.97 114.29 L 343.97 119 L 335.97 123.71 Z" fill="#000000" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 449.76 108 L 463.76 101 L 449.76 94" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(327.5,101.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><g transform="translate(439.5,101.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 8px; white-space: nowrap; word-wrap: normal; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">1</text></switch></g><rect x="206" y="1" width="120" height="50" fill="none" stroke="#468e9f" stroke-width="3" pointer-events="none"/><g transform="translate(222.5,19.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="87" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">RunningQueries</div></div></foreignObject><text x="44" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">RunningQueries</text></switch></g><rect x="466" y="135" width="130" height="50" fill="none" stroke="#468e9f" stroke-width="3" transform="translate(531,0)scale(-1,1)translate(-531,0)" pointer-events="none"/><g transform="translate(478.5,153.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="105" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Local Staging Table</div></div></foreignObject><text x="53" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">Local Staging Table</text></switch></g><path d="M 343.97 144 L 386 144 Q 396 144 396 152 L 396 156 Q 396 160 406 160 L 461.53 160" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 327.97 144 L 335.97 139.29 L 343.97 144 L 335.97 148.71 Z" fill="#000000" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 449.76 167 L 463.76 160 L 449.76 153" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(327.5,126.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><g transform="translate(439.5,146.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="12" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; width: 8px; white-space: nowrap; word-wrap: normal; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="12" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">1</text></switch></g><path d="M 56 88.03 L 56 36 Q 56 26 66 26 L 201.53 26" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 56 104.03 L 51.29 96.03 L 56 88.03 L 60.71 96.03 Z" fill="#000000" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 189.76 33 L 203.76 26 L 189.76 19" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(57.5,72.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><path d="M 56 173.97 L 56 226 Q 56 236 66 236 L 201.53 236" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 56 157.97 L 60.71 165.97 L 56 173.97 L 51.29 165.97 Z" fill="#000000" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 189.76 243 L 203.76 236 L 189.76 229" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(57.5,170.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g><path d="M 266 156 L 266 206.53" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="none"/><path d="M 259 194.76 L 266 208.76 L 273 194.76" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(255.5,178.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="20" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Use</div></div></foreignObject><text x="10" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">Use</text></switch></g><path d="M 266 106 L 266 55.47" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="6 6" pointer-events="none"/><path d="M 273 67.24 L 266 53.24 L 259 67.24" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(255.5,72.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="20" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap; text-align: center;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">Use</div></div></foreignObject><text x="10" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">Use</text></switch></g><path d="M 128.97 131 L 201.53 131" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 112.97 131 L 120.97 126.29 L 128.97 131 L 120.97 135.71 Z" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><path d="M 189.76 138 L 203.76 131 L 189.76 124" fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" pointer-events="none"/><g transform="translate(112.5,113.5)"><switch><foreignObject style="overflow:visible;" pointer-events="all" width="6" height="11" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; vertical-align: top; white-space: nowrap;"><div xmlns="http://www.w3.org/1999/xhtml" style="display:inline-block;text-align:inherit;text-decoration:inherit;">1</div></div></foreignObject><text x="3" y="11" fill="#000000" text-anchor="middle" font-size="11px" font-family="Helvetica">1</text></switch></g></g></svg> \ No newline at end of file
diff --git a/pom.xml b/pom.xml
new file mode 100755
index 0000000..8ef98e3
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,162 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <groupId>com.att.research.mdbc</groupId>
+ <artifactId>mdbc</artifactId>
+ <version>0.0.1-SNAPSHOT</version>
+ <name>mdbc</name>
+ <description>
+ A JDBC implementation that proxies between a primary database (defined by DBInterface.java)
+ and MUSIC (defined by MusicInterface.java).
+
+ The primary database is normally H2, and MUSIC is normally Cassandra.
+ </description>
+ <properties>
+ <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+ </properties>
+
+ <dependencies>
+ <dependency>
+ <groupId>com.datastax.cassandra</groupId>
+ <artifactId>cassandra-driver-core</artifactId>
+ <version>3.3.0</version>
+ </dependency>
+ <dependency>
+ <groupId>com.h2database</groupId>
+ <artifactId>h2</artifactId>
+ <version>1.4.195</version>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-log4j12</artifactId>
+ <version>1.6.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.json</groupId>
+ <artifactId>json</artifactId>
+ <version>20160810</version>
+ </dependency>
+ <dependency>
+ <groupId>mysql</groupId>
+ <artifactId>mysql-connector-java</artifactId>
+ <version>5.1.32</version>
+ </dependency>
+ <!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <version>3.7</version>
+ </dependency>
+ <!-- These two dependencies pull in optional libraries for Cassandra -->
+ <dependency>
+ <groupId>net.jpountz.lz4</groupId>
+ <artifactId>lz4</artifactId>
+ <version>1.3.0</version>
+ </dependency>
+ <dependency>
+ <groupId>org.xerial.snappy</groupId>
+ <artifactId>snappy-java</artifactId>
+ <version>1.1.2.6</version>
+ </dependency>
+ <dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.12</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.vmlens</groupId>
+ <artifactId>concurrent-junit</artifactId>
+ <version>1.0.0</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.github.jsqlparser</groupId>
+ <artifactId>jsqlparser</artifactId>
+ <version>1.1</version>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ <version>3.1</version>
+ </dependency>
+ <dependency>
+ <groupId>com.att.eelf</groupId>
+ <artifactId>eelf-core</artifactId>
+ <version>1.0.0</version>
+ </dependency>
+
+ <!-- https://mvnrepository.com/artifact/javax.servlet/servlet-api -->
+ <dependency>
+ <groupId>javax.servlet</groupId>
+ <artifactId>servlet-api</artifactId>
+ <version>2.5</version>
+ <scope>provided</scope>
+ </dependency>
+ <!-- Music jar from onap -->
+ <dependency>
+ <groupId>org.onap.music</groupId>
+ <artifactId>MUSIC</artifactId>
+ <version>3.0.2</version>
+ </dependency>
+ <!-- https://mvnrepository.com/artifact/org.apache.calcite.avatica/avatica-server -->
+ <dependency>
+ <groupId>org.apache.calcite.avatica</groupId>
+ <artifactId>avatica-server</artifactId>
+ <version>1.12.0</version>
+ </dependency>
+ <!-- https://mvnrepository.com/artifact/org.apache.calcite/calcite-plus -->
+ <dependency>
+ <groupId>org.apache.calcite</groupId>
+ <artifactId>calcite-plus</artifactId>
+ <version>1.12.0</version>
+ </dependency>
+ <dependency>
+ <groupId>com.beust</groupId>
+ <artifactId>jcommander</artifactId>
+ <version>1.72</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.code.gson</groupId>
+ <artifactId>gson</artifactId>
+ <version>2.8.5</version>
+ </dependency>
+ <dependency>
+ <groupId>org.mariadb.jdbc</groupId>
+ <artifactId>mariadb-java-client</artifactId>
+ <version>1.1.7</version>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ <version>3.5.1</version>
+ <configuration>
+ <source>8</source>
+ <target>8</target>
+ </configuration>
+ </plugin>
+ </plugins>
+ </build>
+
+ <reporting>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.10.3</version>
+ <configuration>
+ <author>true</author>
+ <breakiterator>true</breakiterator>
+ <version>true</version>
+ <links>
+ <link>https://docs.oracle.com/javase/7/docs/api/</link>
+ <link>https://docs.oracle.com/javaee/7/api/</link>
+ </links>
+ </configuration>
+ </plugin>
+ </plugins>
+ </reporting>
+</project> \ No newline at end of file
diff --git a/src/main/java/com/att/research/exceptions/MDBCServiceException.java b/src/main/java/com/att/research/exceptions/MDBCServiceException.java
new file mode 100644
index 0000000..46cc1f7
--- /dev/null
+++ b/src/main/java/com/att/research/exceptions/MDBCServiceException.java
@@ -0,0 +1,88 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+
+package com.att.research.exceptions;
+
+/**
+ * @author inam
+ *
+ */
+public class MDBCServiceException extends Exception {
+
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ private int errorCode;
+ private String errorMessage;
+
+ public int getErrorCode() {
+ return errorCode;
+ }
+
+
+ public void setErrorCode(int errorCode) {
+ this.errorCode = errorCode;
+ }
+
+
+ public String getErrorMessage() {
+ return errorMessage;
+ }
+
+
+ public void setErrorMessage(String errorMessage) {
+ this.errorMessage = errorMessage;
+ }
+
+
+ public MDBCServiceException() {
+ super();
+ }
+
+
+ public MDBCServiceException(String message) {
+ super(message);
+
+ }
+
+
+ public MDBCServiceException(Throwable cause) {
+ super(cause);
+
+ }
+
+
+ public MDBCServiceException(String message, Throwable cause) {
+ super(message, cause);
+
+ }
+
+
+ public MDBCServiceException(String message, Throwable cause, boolean enableSuppression,
+ boolean writableStackTrace) {
+ super(message, cause, enableSuppression, writableStackTrace);
+
+ }
+
+}
diff --git a/src/main/java/com/att/research/exceptions/QueryException.java b/src/main/java/com/att/research/exceptions/QueryException.java
new file mode 100644
index 0000000..77445e5
--- /dev/null
+++ b/src/main/java/com/att/research/exceptions/QueryException.java
@@ -0,0 +1,90 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+package com.att.research.exceptions;
+
+
+
+/**
+ * @author inam
+ *
+ */
+public class QueryException extends Exception {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ @SuppressWarnings("unused")
+ private int errorCode;
+
+
+ /**
+ *
+ */
+ public QueryException() {
+ super();
+ }
+
+ /**
+ * @param message
+ */
+ public QueryException(String message) {
+ super(message);
+ }
+
+
+
+ /**
+ * @param message
+ */
+ public QueryException(String message, int errorCode) {
+ super(message);
+ this.errorCode = errorCode;
+ }
+
+ /**
+ * @param cause
+ */
+ public QueryException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * @param message
+ * @param cause
+ */
+ public QueryException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ /**
+ * @param message
+ * @param cause
+ * @param enableSuppression
+ * @param writableStackTrace
+ */
+ public QueryException(String message, Throwable cause, boolean enableSuppression,
+ boolean writableStackTrace) {
+ super(message, cause, enableSuppression, writableStackTrace);
+ }
+
+}
diff --git a/src/main/java/com/att/research/logging/EELFLoggerDelegate.java b/src/main/java/com/att/research/logging/EELFLoggerDelegate.java
new file mode 100644
index 0000000..4e29a75
--- /dev/null
+++ b/src/main/java/com/att/research/logging/EELFLoggerDelegate.java
@@ -0,0 +1,339 @@
+
+package com.att.research.logging;
+
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_INSTANCE_ID;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;
+
+import java.net.InetAddress;
+import java.text.MessageFormat;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.slf4j.MDC;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+import com.att.eelf.configuration.SLF4jWrapper;
+
+public class EELFLoggerDelegate extends SLF4jWrapper implements EELFLogger {
+
+ public static final EELFLogger errorLogger = EELFManager.getInstance().getErrorLogger();
+ public static final EELFLogger applicationLogger = EELFManager.getInstance().getApplicationLogger();
+ public static final EELFLogger auditLogger = EELFManager.getInstance().getAuditLogger();
+ public static final EELFLogger metricsLogger = EELFManager.getInstance().getMetricsLogger();
+ public static final EELFLogger debugLogger = EELFManager.getInstance().getDebugLogger();
+
+ private String className;
+ private static ConcurrentMap<String, EELFLoggerDelegate> classMap = new ConcurrentHashMap<>();
+
+ public EELFLoggerDelegate(final String className) {
+ super(className);
+ this.className = className;
+ }
+
+ /**
+ * Convenience method that gets a logger for the specified class.
+ *
+ * @see #getLogger(String)
+ *
+ * @param clazz
+ * @return Instance of EELFLoggerDelegate
+ */
+ public static EELFLoggerDelegate getLogger(Class<?> clazz) {
+ return getLogger(clazz.getName());
+ }
+
+ /**
+ * Gets a logger for the specified class name. If the logger does not already
+ * exist in the map, this creates a new logger.
+ *
+ * @param className
+ * If null or empty, uses EELFLoggerDelegate as the class name.
+ * @return Instance of EELFLoggerDelegate
+ */
+ public static EELFLoggerDelegate getLogger(final String className) {
+ String classNameNeverNull = className == null || "".equals(className) ? EELFLoggerDelegate.class.getName()
+ : className;
+ EELFLoggerDelegate delegate = classMap.get(classNameNeverNull);
+ if (delegate == null) {
+ delegate = new EELFLoggerDelegate(className);
+ classMap.put(className, delegate);
+ }
+ return delegate;
+ }
+
+ /**
+ * Logs a message at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void trace(EELFLogger logger, String msg) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(msg);
+ }
+ }
+
+ /**
+ * Logs a message with parameters at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void trace(EELFLogger logger, String msg, Object... arguments) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(msg, arguments);
+ }
+ }
+
+ /**
+ * Logs a message and throwable at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void trace(EELFLogger logger, String msg, Throwable th) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(msg, th);
+ }
+ }
+
+ /**
+ * Logs a message at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void debug(EELFLogger logger, String msg) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(msg);
+ }
+ }
+
+ /**
+ * Logs a message with parameters at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void debug(EELFLogger logger, String msg, Object... arguments) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(msg, arguments);
+ }
+ }
+
+ /**
+ * Logs a message and throwable at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void debug(EELFLogger logger, String msg, Throwable th) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(msg, th);
+ }
+ }
+
+ /**
+ * Logs a message at info level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void info(EELFLogger logger, String msg) {
+ logger.info(className + " - "+msg);
+ }
+
+ /**
+ * Logs a message with parameters at info level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void info(EELFLogger logger, String msg, Object... arguments) {
+ logger.info(msg, arguments);
+ }
+
+ /**
+ * Logs a message and throwable at info level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void info(EELFLogger logger, String msg, Throwable th) {
+ logger.info(msg, th);
+ }
+
+ /**
+ * Logs a message at warn level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void warn(EELFLogger logger, String msg) {
+ logger.warn(msg);
+ }
+
+ /**
+ * Logs a message with parameters at warn level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void warn(EELFLogger logger, String msg, Object... arguments) {
+ logger.warn(msg, arguments);
+ }
+
+ /**
+ * Logs a message and throwable at warn level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void warn(EELFLogger logger, String msg, Throwable th) {
+ logger.warn(msg, th);
+ }
+
+ /**
+ * Logs a message at error level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void error(EELFLogger logger, String msg) {
+ logger.error(className+ " - " + msg);
+ }
+
+ /**
+ * Logs a message with parameters at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void error(EELFLogger logger, String msg, Object... arguments) {
+ logger.error(msg, arguments);
+ }
+
+ /**
+ * Logs a message and throwable at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void error(EELFLogger logger, String msg, Throwable th) {
+ logger.error(msg, th);
+ }
+
+ /**
+ * Logs a message with the associated alarm severity at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param severtiy
+ */
+ public void error(EELFLogger logger, String msg, Object /*AlarmSeverityEnum*/ severtiy) {
+ logger.error(msg);
+ }
+
+ /**
+ * Initializes the logger context.
+ */
+ public void init() {
+ setGlobalLoggingContext();
+ final String msg = "############################ Logging is started. ############################";
+ // These loggers emit the current date-time without being told.
+ info(applicationLogger, msg);
+ error(errorLogger, msg);
+ debug(debugLogger, msg);
+ info(auditLogger, msg);
+ info(metricsLogger, msg);
+ }
+
+
+ /**
+ * Builds a message using a template string and the arguments.
+ *
+ * @param message
+ * @param args
+ * @return
+ */
+ @SuppressWarnings("unused")
+ private String formatMessage(String message, Object... args) {
+ StringBuilder sbFormattedMessage = new StringBuilder();
+ if (args != null && args.length > 0 && message != null && message != "") {
+ MessageFormat mf = new MessageFormat(message);
+ sbFormattedMessage.append(mf.format(args));
+ } else {
+ sbFormattedMessage.append(message);
+ }
+
+ return sbFormattedMessage.toString();
+ }
+
+ /**
+ * Loads all the default logging fields into the MDC context.
+ */
+ private void setGlobalLoggingContext() {
+ MDC.put(MDC_SERVICE_INSTANCE_ID, "");
+ try {
+ MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());
+ MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());
+ } catch (Exception e) {
+ errorLogger.error("setGlobalLoggingContext failed", e);
+ }
+ }
+
+ public static void mdcPut(String key, String value) {
+ MDC.put(key, value);
+ }
+
+ public static String mdcGet(String key) {
+ return MDC.get(key);
+ }
+
+ public static void mdcRemove(String key) {
+ MDC.remove(key);
+ }
+
+ /**
+ * Loads the RequestId/TransactionId into the MDC which it should be receiving
+ * with an each incoming REST API request. Also, configures few other request
+ * based logging fields into the MDC context.
+ *
+ * @param req
+ * @param appName
+ */
+ public void setRequestBasedDefaultsIntoGlobalLoggingContext(HttpServletRequest req, String appName) {
+ // Load the default fields
+ setGlobalLoggingContext();
+
+ // Load the request based fields
+ if (req != null) {
+
+
+ // Rest Path
+ MDC.put(MDC_SERVICE_NAME, req.getServletPath());
+
+ // Client IPAddress i.e. IPAddress of the remote host who is making
+ // this request.
+ String clientIPAddress = req.getHeader("X-FORWARDED-FOR");
+ if (clientIPAddress == null) {
+ clientIPAddress = req.getRemoteAddr();
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/logging/format/AppMessages.java b/src/main/java/com/att/research/logging/format/AppMessages.java
new file mode 100644
index 0000000..a5de413
--- /dev/null
+++ b/src/main/java/com/att/research/logging/format/AppMessages.java
@@ -0,0 +1,156 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+
+package com.att.research.logging.format;
+
+/**
+ * @author inam
+ *
+ */
+public enum AppMessages {
+
+
+
+ /*
+ * 100-199 Security/Permission Related - Authentication problems
+ * [ERR100E] Missing Information
+ * [ERR101E] Authentication error occured
+ *
+ * 200-299 Availability/Timeout Related/IO - connectivity error - connection timeout
+ * [ERR200E] Connectivity
+ * [ERR201E] Host not available
+ * [ERR202E] Error while connecting
+ * [ERR203E] IO Error has occured
+ * [ERR204E] Execution Interrupted
+ * [ERR205E] Session Expired
+ *
+ *
+ *
+ * 300-399 Data Access/Integrity Related
+ * [ERR300E] Incorrect data
+ *
+ * 400-499 - Cassandra Query Related
+ *
+ *
+ * 500-599 - Zookeepr/Locking Related
+
+ *
+ *
+ * 600 - 699 - MDBC Service Errors
+ * [ERR600E] Error initializing the MDBC
+ *
+ * 700-799 Schema Interface Type/Validation - received Pay-load checksum is
+ * invalid - received JSON is not valid
+ *
+ * 800-899 Business/Flow Processing Related - check out to service is not
+ * allowed - Roll-back is done - failed to generate heat file
+ *
+ *
+ * 900-999 Unknown Errors - Unexpected exception
+ * [ERR900E] Unexpected error occured
+ * [ERR901E] Number format exception
+ *
+ *
+ * 1000-1099 Reserved - do not use
+ *
+ */
+
+
+
+
+ MISSINGINFO("[ERR100E]", "Missing Information ","Details: NA", "Please check application credentials and/or headers"),
+ AUTHENTICATIONERROR("[ERR101E]", "Authentication error occured ","Details: NA", "Please verify application credentials"),
+
+ CONNCECTIVITYERROR("[ERR200E]"," Connectivity error","Details: NA ","Please check connectivity to external resources"),
+ HOSTUNAVAILABLE("[ERR201E]","Host not available","Details: NA","Please verify the host details"),
+ IOERROR("[ERR203E]","IO Error has occured","","Please check IO"),
+ EXECUTIONINTERRUPTED("[ERR204E]"," Execution Interrupted","",""),
+
+
+ INCORRECTDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"),
+ MULTIPLERECORDS("[ERR301E]"," Multiple records found",""," Please verify the request payload and try again"),
+ ALREADYEXIST("[ERR302E]"," Record already exist",""," Please verify the request payload and try again"),
+ MISSINGDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"),
+
+ QUERYERROR("[ERR400E]","Error while processing query",""," Please verify the query"),
+
+
+ UNKNOWNERROR("[ERR900E]"," Unexpected error occured",""," Please check logs for details");
+
+
+
+ ErrorTypes eType;
+ ErrorSeverity alarmSeverity;
+ ErrorSeverity errorSeverity;
+ String errorCode;
+ String errorDescription;
+ String details;
+ String resolution;
+
+
+ AppMessages(String errorCode, String errorDescription, String details,String resolution) {
+
+ this.errorCode = errorCode;
+ this.errorDescription = errorDescription;
+ this.details = details;
+ this.resolution = resolution;
+ }
+
+
+
+
+ AppMessages(ErrorTypes eType, ErrorSeverity alarmSeverity,
+ ErrorSeverity errorSeverity, String errorCode, String errorDescription, String details,
+ String resolution) {
+
+ this.eType = eType;
+ this.alarmSeverity = alarmSeverity;
+ this.errorSeverity = errorSeverity;
+ this.errorCode = errorCode;
+ this.errorDescription = errorDescription;
+ this.details = details;
+ this.resolution = resolution;
+ }
+
+ public String getDetails() {
+ return this.details;
+ }
+
+ public String getResolution() {
+ return this.resolution;
+ }
+
+ public String getErrorCode() {
+ return this.errorCode;
+ }
+
+ public String getErrorDescription() {
+ return this.errorDescription;
+ }
+
+
+
+
+
+
+
+}
diff --git a/src/main/java/com/att/research/logging/format/ErrorSeverity.java b/src/main/java/com/att/research/logging/format/ErrorSeverity.java
new file mode 100644
index 0000000..dbe3e54
--- /dev/null
+++ b/src/main/java/com/att/research/logging/format/ErrorSeverity.java
@@ -0,0 +1,37 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+package com.att.research.logging.format;
+
+/**
+ * @author inam
+ *
+ */
+public enum ErrorSeverity {
+ INFO,
+ WARN,
+ ERROR,
+ FATAL,
+ CRITICAL,
+ MAJOR,
+ MINOR,
+ NONE,
+}
diff --git a/src/main/java/com/att/research/logging/format/ErrorTypes.java b/src/main/java/com/att/research/logging/format/ErrorTypes.java
new file mode 100644
index 0000000..620528d
--- /dev/null
+++ b/src/main/java/com/att/research/logging/format/ErrorTypes.java
@@ -0,0 +1,44 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+package com.att.research.logging.format;
+
+import com.att.eelf.i18n.EELFResolvableErrorEnum;
+
+/**
+ * @author inam
+ *
+ */
+public enum ErrorTypes implements EELFResolvableErrorEnum {
+
+
+ CONNECTIONERROR,
+ SESSIONEXPIRED,
+ AUTHENTICATIONERROR,
+ SERVICEUNAVAILABLE,
+ QUERYERROR,
+ DATAERROR,
+ GENERALSERVICEERROR,
+ MUSICSERVICEERROR,
+ LOCKINGERROR,
+ UNKNOWN,
+
+}
diff --git a/src/main/java/com/att/research/mdbc/ArchiveProcess.java b/src/main/java/com/att/research/mdbc/ArchiveProcess.java
new file mode 100644
index 0000000..f192430
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/ArchiveProcess.java
@@ -0,0 +1,43 @@
+package com.att.research.mdbc;
+
+import org.json.JSONObject;
+
+import com.att.research.mdbc.mixins.DBInterface;
+import com.att.research.mdbc.mixins.MusicInterface;
+
+public class ArchiveProcess {
+ protected MusicInterface mi;
+ protected DBInterface dbi;
+
+ //TODO: This is a place holder for taking snapshots and moving data from redo record into actual tables
+
+ /**
+ * This method is called whenever there is a DELETE on a local SQL table, and should be called by the underlying databases
+ * triggering mechanism. It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL DELETE.
+ * Music propagates it to the other replicas. If the local database is in the middle of a transaction, the DELETEs to MUSIC are
+ * delayed until the transaction is either committed or rolled back.
+ * @param tableName This is the table on which the select is being performed
+ * @param oldRow This is information about the row that is being deleted
+ */
+ @SuppressWarnings("unused")
+ private void deleteFromEntityTableInMusic(String tableName, JSONObject oldRow) {
+ TableInfo ti = dbi.getTableInfo(tableName);
+ mi.deleteFromEntityTableInMusic(ti,tableName, oldRow);
+ }
+
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, and should be called by the underlying databases
+ * triggering mechanism. It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write.
+ * Music propagates it to the other replicas. If the local database is in the middle of a transaction, the updates to MUSIC are
+ * delayed until the transaction is either committed or rolled back.
+ *
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed, an array of objects representing the data being inserted/updated
+ */
+ @SuppressWarnings("unused")
+ private void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow) {
+ //TODO: is this right? should we be saving updates at the client? we should leverage JDBC to handle this
+ TableInfo ti = dbi.getTableInfo(tableName);
+ mi.updateDirtyRowAndEntityTableInMusic(ti,tableName, changedRow);
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/Configuration.java b/src/main/java/com/att/research/mdbc/Configuration.java
new file mode 100644
index 0000000..23aa6af
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/Configuration.java
@@ -0,0 +1,18 @@
+package com.att.research.mdbc;
+
+public class Configuration {
+ /** The property name to use to connect to cassandra*/
+ public static final String KEY_CASSANDRA_URL = "CASSANDRA_URL";
+ /** The property name to use to enable/disable the MusicSqlManager entirely. */
+ public static final String KEY_DISABLED = "disabled";
+ /** The property name to use to select the DB 'mixin'. */
+ public static final String KEY_DB_MIXIN_NAME = "MDBC_DB_MIXIN";
+ /** The property name to use to select the MUSIC 'mixin'. */
+ public static final String KEY_MUSIC_MIXIN_NAME = "MDBC_MUSIC_MIXIN";
+ /** The name of the default mixin to use for the DBInterface. */
+ public static final String DB_MIXIN_DEFAULT = "mysql";//"h2";
+ /** The name of the default mixin to use for the MusicInterface. */
+ public static final String MUSIC_MIXIN_DEFAULT = "cassandra2";//"cassandra2";
+ /** Default cassandra ulr*/
+ public static final String CASSANDRA_URL_DEFAULT = "localhost";//"cassandra2";
+}
diff --git a/src/main/java/com/att/research/mdbc/DatabaseOperations.java b/src/main/java/com/att/research/mdbc/DatabaseOperations.java
new file mode 100644
index 0000000..406152e
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/DatabaseOperations.java
@@ -0,0 +1,443 @@
+package com.att.research.mdbc;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicLockingException;
+import org.onap.music.exceptions.MusicQueryException;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+import org.onap.music.main.ResultType;
+import org.onap.music.main.ReturnType;
+
+import java.util.*;
+
+public class DatabaseOperations {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabaseOperations.class);
+ /**
+ * This functions is used to generate cassandra uuid
+ * @return a random UUID that can be used for fields of type uuid
+ */
+ public static String generateUniqueKey() {
+ return UUID.randomUUID().toString();
+ }
+
+ /**
+ * This functions returns the primary key used to managed a specific row in the TableToPartition tables in Music
+ * @param namespace namespace where the TableToPartition resides
+ * @param tableToPartitionTableName name of the tableToPartition table
+ * @param tableName name of the application table that is being added to the system
+ * @return primary key to be used with MUSIC
+ */
+ public static String getTableToPartitionPrimaryKey(String namespace, String tableToPartitionTableName, String tableName){
+ return namespace+"."+tableToPartitionTableName+"."+tableName;
+ }
+
+ /**
+ * Create a new row for a table, with not assigned partition
+ * @param namespace namespace where the TableToPartition resides
+ * @param tableToPartitionTableName name of the tableToPartition table
+ * @param tableName name of the application table that is being added to the system
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ */
+ public static void createNewTableToPartitionRow(String namespace, String tableToPartitionTableName, String tableName,String lockId) throws MDBCServiceException {
+ final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,tableName);
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(tableToPartitionTableName)
+ .append(" (tablename) VALUES ")
+ .append("('")
+ .append(tableName)
+ .append("');");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,tableToPartitionTableName,tableName,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create new row table to partition table ");
+ throw new MDBCServiceException("Initialization error: Failure to create new row table to partition table");
+ }
+ }
+
+ /**
+ * Update the partition to which a table belongs
+ * @param namespace namespace where the TableToPartition resides
+ * @param tableToPartitionTableName name of the tableToPartition table
+ * @param table name of the application table that is being added to the system
+ * @param newPartition partition to which the application table is assigned
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ */
+ public static void updateTableToPartition(String namespace, String tableToPartitionTableName, String table, String newPartition, String lockId) throws MDBCServiceException {
+ final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,table);
+ PreparedQueryObject query = new PreparedQueryObject();
+ StringBuilder update = new StringBuilder("UPDATE ")
+ .append(namespace)
+ .append('.')
+ .append(tableToPartitionTableName)
+ .append(" SET previouspartitions = previouspartitions + {")
+ .append(newPartition)
+ .append("}, partition = " )
+ .append(newPartition)
+ .append(" WHERE tablename = '")
+ .append(table)
+ .append("';");
+ query.appendQueryString(update.toString());
+ try {
+ executedLockedPut(namespace,tableToPartitionTableName,table,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to update a row in table to partition table ");
+ throw new MDBCServiceException("Initialization error: Failure to update a row in table to partition table");
+ }
+ }
+
+
+ public static String getPartitionInformationPrimaryKey(String namespace, String partitionInformationTable, String partition){
+ return namespace+"."+partitionInformationTable+"."+partition;
+ }
+
+ /**
+ * Create a new row, when a new partition is initialized
+ * @param namespace namespace to which the partition info table resides in Cassandra
+ * @param partitionInfoTableName name of the partition information table
+ * @param replicationFactor associated replicated factor for the partition (max of all the tables)
+ * @param tables list of tables that are within this partitoin
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ * @return the partition uuid associated to the new row
+ */
+ public static String createPartitionInfoRow(String namespace, String partitionInfoTableName, int replicationFactor, List<String> tables, String lockId) throws MDBCServiceException {
+ String id = generateUniqueKey();
+ final String primaryKey = getPartitionInformationPrimaryKey(namespace,partitionInfoTableName,id);
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(partitionInfoTableName)
+ .append(" (partition,replicationfactor,tables) VALUES ")
+ .append("(")
+ .append(id)
+ .append(",")
+ .append(replicationFactor)
+ .append(",{");
+ boolean first = true;
+ for(String table: tables){
+ if(!first){
+ insert.append(",");
+ }
+ first = false;
+ insert.append("'")
+ .append(table)
+ .append("'");
+ }
+ insert.append("});");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,partitionInfoTableName,id,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create new row in partition information table ");
+ throw new MDBCServiceException("Initialization error: Failure to create new row in partition information table");
+ }
+ return id;
+ }
+
+ /**
+ * Update the TIT row and table that currently handles the partition
+ * @param namespace namespace to which the partition info table resides in Cassandra
+ * @param partitionInfoTableName name of the partition information table
+ * @param partitionId row identifier for the partition being modiefd
+ * @param newTitRow new TIT row and table that are handling this partition
+ * @param owner owner that is handling the new tit row (url to the corresponding etdb nodej
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ */
+ public static void updateRedoRow(String namespace, String partitionInfoTableName, String partitionId, RedoRow newTitRow, String owner, String lockId) throws MDBCServiceException {
+ final String primaryKey = getTableToPartitionPrimaryKey(namespace,partitionInfoTableName,partitionId);
+ PreparedQueryObject query = new PreparedQueryObject();
+ String newOwner = (owner==null)?"":owner;
+ StringBuilder update = new StringBuilder("UPDATE ")
+ .append(namespace)
+ .append('.')
+ .append(partitionInfoTableName)
+ .append(" SET currentowner='")
+ .append(newOwner)
+ .append("', latesttitindex=")
+ .append(newTitRow.getRedoRowIndex())
+ .append(", latesttittable='")
+ .append(newTitRow.getRedoTableName())
+ .append("' WHERE partition = ")
+ .append(partitionId)
+ .append(";");
+ query.appendQueryString(update.toString());
+ try {
+ executedLockedPut(namespace,partitionInfoTableName,partitionId,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to add new owner to partition in music table ");
+ throw new MDBCServiceException("Initialization error:Failure to add new owner to partition in music table ");
+ }
+ }
+
+ /**
+ * Create the first row in the history of the redo history table for a given partition
+ * @param namespace namespace to which the redo history table resides in Cassandra
+ * @param redoHistoryTableName name of the table where the row is being created
+ * @param firstTitRow first tit associated to the partition
+ * @param partitionId partition for which a history is created
+ */
+ public static void createRedoHistoryBeginRow(String namespace, String redoHistoryTableName, RedoRow firstTitRow, String partitionId, String lockId) throws MDBCServiceException {
+ createRedoHistoryRow(namespace,redoHistoryTableName,firstTitRow,partitionId, new ArrayList<>(),lockId);
+ }
+
+ /**
+ * Create a new row on the history for a given partition
+ * @param namespace namespace to which the redo history table resides in Cassandra
+ * @param redoHistoryTableName name of the table where the row is being created
+ * @param currentRow new tit row associated to the partition
+ * @param partitionId partition for which a history is created
+ * @param parentsRows parent tit rows associated to this partition
+ */
+ public static void createRedoHistoryRow(String namespace, String redoHistoryTableName, RedoRow currentRow, String partitionId, List<RedoRow> parentsRows, String lockId) throws MDBCServiceException {
+ final String primaryKey = partitionId+"-"+currentRow.getRedoTableName()+"-"+currentRow.getRedoRowIndex();
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(redoHistoryTableName)
+ .append(" (partition,redotable,redoindex,previousredo) VALUES ")
+ .append("(")
+ .append(partitionId)
+ .append(",'")
+ .append(currentRow.getRedoTableName())
+ .append("',")
+ .append(currentRow.getRedoRowIndex())
+ .append(",{");
+ boolean first = true;
+ for(RedoRow parent: parentsRows){
+ if(!first){
+ insert.append(",");
+ }
+ else{
+ first = false;
+ }
+ insert.append("('")
+ .append(parent.getRedoTableName())
+ .append("',")
+ .append(parent.getRedoRowIndex())
+ .append("),");
+ }
+ insert.append("});");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,redoHistoryTableName,primaryKey,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to add new row to redo history");
+ throw new MDBCServiceException("Initialization error:Failure to add new row to redo history");
+ }
+ }
+
+ /**
+ * Creates a new empty tit row
+ * @param namespace namespace where the tit table is located
+ * @param titTableName name of the corresponding tit table where the new row is added
+ * @param partitionId partition to which the redo log is hold
+ * @return uuid associated to the new row
+ */
+ public static String CreateEmptyTitRow(String namespace, String titTableName, String partitionId, String lockId) throws MDBCServiceException {
+ String id = generateUniqueKey();
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(titTableName)
+ .append(" (id,applied,latestapplied,partition,redo) VALUES ")
+ .append("(")
+ .append(id)
+ .append(",false,-1,")
+ .append(partitionId)
+ .append(",[]);");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,titTableName,id,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to add new row to transaction information");
+ throw new MDBCServiceException("Initialization error:Failure to add new row to transaction information");
+ }
+ return id;
+ }
+
+ /**
+ * This function creates the TransactionInformation table. It contain information related
+ * to the transactions happening in a given partition.
+ * * The schema of the table is
+ * * Id, uiid.
+ * * Partition, uuid id of the partition
+ * * LatestApplied, int indicates which values from the redologtable wast the last to be applied to the data tables
+ * * Applied: boolean, indicates if all the values in this redo log table where already applied to data tables
+ * * Redo: list of uiids associated to the Redo Records Table
+ *
+ */
+ public static void CreateTransactionInformationTable( String musicNamespace, String transactionInformationTableName) throws MDBCServiceException {
+ String tableName = transactionInformationTableName;
+ String priKey = "id";
+ StringBuilder fields = new StringBuilder();
+ fields.append("id uuid, ");
+ fields.append("partition uuid, ");
+ fields.append("latestapplied int, ");
+ fields.append("applied boolean, ");
+ //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly
+ fields.append("redo list<frozen<tuple<text,tuple<text,varint>>>> ");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create transaction information table");
+ throw(e);
+ }
+ }
+
+ /**
+ * This function creates the RedoRecords table. It contain information related to each transaction committed
+ * * LeaseId: id associated with the lease, text
+ * * LeaseCounter: transaction number under this lease, bigint \TODO this may need to be a varint later
+ * * TransactionDigest: text that contains all the changes in the transaction
+ */
+ public static void CreateRedoRecordsTable(int redoTableNumber, String musicNamespace, String redoRecordTableName) throws MDBCServiceException {
+ String tableName = redoRecordTableName;
+ if(redoTableNumber >= 0) {
+ StringBuilder table = new StringBuilder();
+ table.append(tableName);
+ table.append("-");
+ table.append(Integer.toString(redoTableNumber));
+ tableName=table.toString();
+ }
+ String priKey = "leaseid,leasecounter";
+ StringBuilder fields = new StringBuilder();
+ fields.append("leaseid text, ");
+ fields.append("leasecounter varint, ");
+ fields.append("transactiondigest text ");//notice lack of ','
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create redo records table");
+ throw(e);
+ }
+ }
+
+ /**
+ * This function creates the Table To Partition table. It contain information related to
+ */
+ public static void CreateTableToPartitionTable(String musicNamespace, String tableToPartitionTableName) throws MDBCServiceException {
+ String tableName = tableToPartitionTableName;
+ String priKey = "tablename";
+ StringBuilder fields = new StringBuilder();
+ fields.append("tablename text, ");
+ fields.append("partition uuid, ");
+ fields.append("previouspartitions set<uuid> ");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create table to partition table");
+ throw(e);
+ }
+ }
+
+ public static void CreatePartitionInfoTable(String musicNamespace, String partitionInformationTableName) throws MDBCServiceException {
+ String tableName = partitionInformationTableName;
+ String priKey = "partition";
+ StringBuilder fields = new StringBuilder();
+ fields.append("partition uuid, ");
+ fields.append("latesttittable text, ");
+ fields.append("latesttitindex uuid, ");
+ fields.append("tables set<text>, ");
+ fields.append("replicationfactor int, ");
+ fields.append("currentowner text");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create partition information table");
+ throw(e);
+ }
+ }
+
+ public static void CreateRedoHistoryTable(String musicNamespace, String redoHistoryTableName) throws MDBCServiceException {
+ String tableName = redoHistoryTableName;
+ String priKey = "partition,redotable,redoindex";
+ StringBuilder fields = new StringBuilder();
+ fields.append("partition uuid, ");
+ fields.append("redotable text, ");
+ fields.append("redoindex uuid, ");
+ //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly
+ fields.append("previousredo set<frozen<tuple<text,uuid>>>");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create redo history table");
+ throw(e);
+ }
+ }
+
+ /**
+ * This method executes a write query in Music
+ * @param cql the CQL to be sent to Cassandra
+ */
+ protected static void executeMusicWriteQuery(String keyspace, String table, String cql) throws MDBCServiceException {
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ ResultType rt = null;
+ try {
+ rt = MusicPureCassaCore.createTable(keyspace,table,pQueryObject,"critical");
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ }
+ if (rt.getResult().toLowerCase().equals("failure")) {
+ throw new MDBCServiceException("Music eventual put failed");
+ }
+ }
+
+ protected static void executedLockedPut(String namespace, String tableName, String primaryKeyWithoutDomain, PreparedQueryObject queryObject, String lockId, MusicPureCassaCore.Condition conditionInfo) throws MDBCServiceException {
+ ReturnType rt ;
+ if(lockId==null) {
+ try {
+ rt = MusicPureCassaCore.atomicPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, conditionInfo);
+ } catch (MusicLockingException e) {
+ logger.error("Music locked put failed");
+ throw new MDBCServiceException("Music locked put failed");
+ } catch (MusicServiceException e) {
+ logger.error("Music service fail: Music locked put failed");
+ throw new MDBCServiceException("Music service fail: Music locked put failed");
+ } catch (MusicQueryException e) {
+ logger.error("Music query fail: locked put failed");
+ throw new MDBCServiceException("Music query fail: Music locked put failed");
+ }
+ }
+ else {
+ rt = MusicPureCassaCore.criticalPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, lockId, conditionInfo);
+ }
+ if (rt.getResult().getResult().toLowerCase().equals("failure")) {
+ throw new MDBCServiceException("Music locked put failed");
+ }
+ }
+
+ public static void createNamespace(String namespace, int replicationFactor) throws MDBCServiceException {
+ Map<String,Object> replicationInfo = new HashMap<String, Object>();
+ replicationInfo.put("'class'", "'SimpleStrategy'");
+ replicationInfo.put("'replication_factor'", replicationFactor);
+
+ PreparedQueryObject queryObject = new PreparedQueryObject();
+ queryObject.appendQueryString(
+ "CREATE KEYSPACE " + namespace + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":"));
+
+ try {
+ MusicPureCassaCore.nonKeyRelatedPut(queryObject, "critical");
+ } catch (MusicServiceException e) {
+ if (e.getMessage().equals("Keyspace "+namespace+" already exists")) {
+ // ignore
+ } else {
+ logger.error("Error creating namespace: "+namespace);
+ throw new MDBCServiceException("Error creating namespace: "+namespace+". Internal error:"+e.getErrorMessage());
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/DatabasePartition.java b/src/main/java/com/att/research/mdbc/DatabasePartition.java
new file mode 100644
index 0000000..6046801
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/DatabasePartition.java
@@ -0,0 +1,190 @@
+package com.att.research.mdbc;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.mixins.CassandraMixin;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * A database range contain information about what ranges should be hosted in the current MDBC instance
+ * A database range with an empty map, is supposed to contain all the tables in Music.
+ * @author Enrique Saurez
+ */
+public class DatabasePartition {
+ private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabasePartition.class);
+
+ private String transactionInformationTable;//Table that currently contains the REDO log for this partition
+ private String transactionInformationIndex;//Index that can be obtained either from
+ private String redoRecordsTable;
+ private String partitionId;
+ private String lockId;
+ protected Set<Range> ranges;
+
+ /**
+ * Each range represents a partition of the database, a database partition is a union of this partitions.
+ * The only requirement is that the ranges are not overlapping.
+ */
+
+ public DatabasePartition() {
+ ranges = new HashSet<>();
+ }
+
+ public DatabasePartition(Set<Range> knownRanges, String titIndex, String titTable, String partitionId, String lockId, String redoRecordsTable) {
+ if(knownRanges != null) {
+ ranges = knownRanges;
+ }
+ else {
+ ranges = new HashSet<>();
+ }
+
+ if(redoRecordsTable != null) {
+ this.setRedoRecordsTable(redoRecordsTable);
+ }
+ else{
+ this.setRedoRecordsTable("");
+ }
+
+ if(titIndex != null) {
+ this.setTransactionInformationIndex(titIndex);
+ }
+ else {
+ this.setTransactionInformationIndex("");
+ }
+
+ if(titTable != null) {
+ this.setTransactionInformationTable(titTable);
+ }
+ else {
+ this.setTransactionInformationTable("");
+ }
+
+ if(partitionId != null) {
+ this.setPartitionId(partitionId);
+ }
+ else {
+ this.setPartitionId("");
+ }
+
+ if(lockId != null) {
+ this.setLockId(lockId);
+ }
+ else {
+ this.setLockId("");
+ }
+ }
+
+ public String getTransactionInformationTable() {
+ return transactionInformationTable;
+ }
+
+ public void setTransactionInformationTable(String transactionInformationTable) {
+ this.transactionInformationTable = transactionInformationTable;
+ }
+
+ public String getTransactionInformationIndex() {
+ return transactionInformationIndex;
+ }
+
+ public void setTransactionInformationIndex(String transactionInformationIndex) {
+ this.transactionInformationIndex = transactionInformationIndex;
+ }
+
+ /**
+ * Add a new range to the ones own by the local MDBC
+ * @param newRange range that is being added
+ * @throws IllegalArgumentException
+ */
+ public synchronized void addNewRange(Range newRange) {
+ //Check overlap
+ for(Range r : ranges) {
+ if(r.overlaps(newRange)) {
+ throw new IllegalArgumentException("Range is already contain by a previous range");
+ }
+ }
+ if(!ranges.contains(newRange)) {
+ ranges.add(newRange);
+ }
+ }
+
+ /**
+ * Delete a range that is being modified
+ * @param rangeToDel limits of the range
+ */
+ public synchronized void deleteRange(Range rangeToDel) {
+ if(!ranges.contains(rangeToDel)) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Range doesn't exist");
+ throw new IllegalArgumentException("Invalid table");
+ }
+ ranges.remove(rangeToDel);
+ }
+
+ /**
+ * Get all the ranges that are currently owned
+ * @return ranges
+ */
+ public synchronized Range[] getSnapshot() {
+ return (Range[]) ranges.toArray();
+ }
+
+ /**
+ * Serialize the ranges
+ * @return serialized ranges
+ */
+ public String toJson() {
+ GsonBuilder builder = new GsonBuilder();
+ builder.setPrettyPrinting().serializeNulls();;
+ Gson gson = builder.create();
+ return gson.toJson(this);
+ }
+
+ /**
+ * Function to obtain the configuration
+ * @param filepath path to the database range
+ * @return a new object of type DatabaseRange
+ * @throws FileNotFoundException
+ */
+
+ public static DatabasePartition readJsonFromFile( String filepath) throws FileNotFoundException {
+ BufferedReader br;
+ try {
+ br = new BufferedReader(
+ new FileReader(filepath));
+ } catch (FileNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e);
+ throw e;
+ }
+ Gson gson = new Gson();
+ DatabasePartition range = gson.fromJson(br, DatabasePartition.class);
+ return range;
+ }
+
+ public String getPartitionId() {
+ return partitionId;
+ }
+
+ public void setPartitionId(String partitionId) {
+ this.partitionId = partitionId;
+ }
+
+ public String getLockId() {
+ return lockId;
+ }
+
+ public void setLockId(String lockId) {
+ this.lockId = lockId;
+ }
+
+ public String getRedoRecordsTable() {
+ return redoRecordsTable;
+ }
+
+ public void setRedoRecordsTable(String redoRecordsTable) {
+ this.redoRecordsTable = redoRecordsTable;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/LockId.java b/src/main/java/com/att/research/mdbc/LockId.java
new file mode 100644
index 0000000..a1de21a
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/LockId.java
@@ -0,0 +1,46 @@
+package com.att.research.mdbc;
+
+public class LockId {
+ private String primaryKey;
+ private String domain;
+ private String lockReference;
+
+ public LockId(String primaryKey, String domain, String lockReference){
+ this.primaryKey = primaryKey;
+ this.domain = domain;
+ if(lockReference == null) {
+ this.lockReference = "";
+ }
+ else{
+ this.lockReference = lockReference;
+ }
+ }
+
+ public String getFullyQualifiedLockKey(){
+ return this.domain+"."+this.primaryKey;
+ }
+
+ public String getPrimaryKey() {
+ return primaryKey;
+ }
+
+ public void setPrimaryKey(String primaryKey) {
+ this.primaryKey = primaryKey;
+ }
+
+ public String getDomain() {
+ return domain;
+ }
+
+ public void setDomain(String domain) {
+ this.domain = domain;
+ }
+
+ public String getLockReference() {
+ return lockReference;
+ }
+
+ public void setLockReference(String lockReference) {
+ this.lockReference = lockReference;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/MDBCUtils.java b/src/main/java/com/att/research/mdbc/MDBCUtils.java
new file mode 100644
index 0000000..411be8d
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MDBCUtils.java
@@ -0,0 +1,62 @@
+package com.att.research.mdbc;
+
+import java.io.*;
+import java.util.Base64;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+import org.json.JSONObject;
+
+public class MDBCUtils {
+ /** Read the object from Base64 string. */
+ public static Object fromString( String s ) throws IOException ,
+ ClassNotFoundException {
+ byte [] data = Base64.getDecoder().decode( s );
+ ObjectInputStream ois = new ObjectInputStream(
+ new ByteArrayInputStream( data ) );
+ Object o = ois.readObject();
+ ois.close();
+ return o;
+ }
+
+ /** Write the object to a Base64 string. */
+ public static String toString( Serializable o ) throws IOException {
+ //TODO We may want to also compress beside serialize
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try {
+ ObjectOutputStream oos = new ObjectOutputStream(baos);
+ oos.writeObject(o);
+ oos.close();
+ return Base64.getEncoder().encodeToString(baos.toByteArray());
+ }
+ finally{
+ baos.close();
+ }
+ }
+
+ public static String toString( JSONObject o) throws IOException {
+ //TODO We may want to also compress beside serialize
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream( baos );
+ oos.writeObject( o );
+ oos.close();
+ return Base64.getEncoder().encodeToString(baos.toByteArray());
+ }
+
+ public static void saveToFile(String serializedContent, String filename, EELFLoggerDelegate logger) throws IOException {
+ try (PrintWriter fout = new PrintWriter(filename)) {
+ fout.println(serializedContent);
+ } catch (FileNotFoundException e) {
+ if(logger!=null){
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.IOERROR, ErrorTypes.UNKNOWN, ErrorSeverity.CRITICAL);
+ }
+ else {
+ e.printStackTrace();
+ }
+ throw e;
+ }
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java b/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java
new file mode 100644
index 0000000..fefce21
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java
@@ -0,0 +1,738 @@
+package com.att.research.mdbc;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.Ref;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.Map;
+
+import com.att.research.logging.EELFLoggerDelegate;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class MdbcCallableStatement extends MdbcPreparedStatement implements CallableStatement {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcCallableStatement.class);
+ @SuppressWarnings("unused")
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ public MdbcCallableStatement(Statement stmt, MusicSqlManager m) {
+ super(stmt, m);
+ }
+
+ public MdbcCallableStatement(Statement stmt, String sql, MusicSqlManager mgr) {
+ super(stmt, sql, mgr);
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName());
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName());
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterIndex, x);
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ return ((CallableStatement)stmt).getParameterMetaData();
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterIndex, x);
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterIndex, value);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale);
+ }
+
+ @Override
+ public boolean wasNull() throws SQLException {
+ return ((CallableStatement)stmt).wasNull();
+ }
+
+ @Override
+ public String getString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterIndex);
+ }
+
+ @Override
+ public boolean getBoolean(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterIndex);
+ }
+
+ @Override
+ public byte getByte(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterIndex);
+ }
+
+ @Override
+ public short getShort(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterIndex);
+ }
+
+ @Override
+ public int getInt(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterIndex);
+ }
+
+ @Override
+ public long getLong(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterIndex);
+ }
+
+ @Override
+ public float getFloat(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterIndex);
+ }
+
+ @Override
+ public double getDouble(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterIndex);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale);
+ }
+
+ @Override
+ public byte[] getBytes(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, map);
+ }
+
+ @Override
+ public Ref getRef(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterIndex);
+ }
+
+ @Override
+ public Blob getBlob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterIndex);
+ }
+
+ @Override
+ public Clob getClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterIndex);
+ }
+
+ @Override
+ public Array getArray(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex, cal);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public URL getURL(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterIndex);
+ }
+
+ @Override
+ public void setURL(String parameterName, URL val) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterName, val);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType);
+ }
+
+ @Override
+ public void setBoolean(String parameterName, boolean x) throws SQLException {
+ ((CallableStatement)stmt).setBoolean(parameterName, x);
+ }
+
+ @Override
+ public void setByte(String parameterName, byte x) throws SQLException {
+ ((CallableStatement)stmt).setByte(parameterName, x);
+ }
+
+ @Override
+ public void setShort(String parameterName, short x) throws SQLException {
+ ((CallableStatement)stmt).setShort(parameterName, x);
+ }
+
+ @Override
+ public void setInt(String parameterName, int x) throws SQLException {
+ ((CallableStatement)stmt).setInt(parameterName, x);
+ }
+
+ @Override
+ public void setLong(String parameterName, long x) throws SQLException {
+ ((CallableStatement)stmt).setLong(parameterName, x);
+ }
+
+ @Override
+ public void setFloat(String parameterName, float x) throws SQLException {
+ ((CallableStatement)stmt).setFloat(parameterName, x);
+ }
+
+ @Override
+ public void setDouble(String parameterName, double x) throws SQLException {
+ ((CallableStatement)stmt).setDouble(parameterName, x);
+ }
+
+ @Override
+ public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
+ ((CallableStatement)stmt).setBigDecimal(parameterName, x);
+ }
+
+ @Override
+ public void setString(String parameterName, String x) throws SQLException {
+ ((CallableStatement)stmt).setString(parameterName, x);
+ }
+
+ @Override
+ public void setBytes(String parameterName, byte[] x) throws SQLException {
+ ((CallableStatement)stmt).setBytes(parameterName, x);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x, cal);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public String getString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterName);
+ }
+
+ @Override
+ public boolean getBoolean(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterName);
+ }
+
+ @Override
+ public byte getByte(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterName);
+ }
+
+ @Override
+ public short getShort(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterName);
+ }
+
+ @Override
+ public int getInt(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterName);
+ }
+
+ @Override
+ public long getLong(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterName);
+ }
+
+ @Override
+ public float getFloat(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterName);
+ }
+
+ @Override
+ public double getDouble(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterName);
+ }
+
+ @Override
+ public byte[] getBytes(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName);
+ }
+
+ @Override
+ public Time getTime(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, map);
+ }
+
+ @Override
+ public Ref getRef(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterName);
+ }
+
+ @Override
+ public Blob getBlob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterName);
+ }
+
+ @Override
+ public Clob getClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterName);
+ }
+
+ @Override
+ public Array getArray(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName, cal);
+ }
+
+ @Override
+ public Time getTime(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName, cal);
+ }
+
+ @Override
+ public URL getURL(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterName);
+ }
+
+ @Override
+ public RowId getRowId(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterIndex);
+ }
+
+ @Override
+ public RowId getRowId(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterName);
+ }
+
+ @Override
+ public void setRowId(String parameterName, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterName, x);
+ }
+
+ @Override
+ public void setNString(String parameterName, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterName, value);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader, length);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader, length);
+ }
+
+ @Override
+ public NClob getNClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterIndex);
+ }
+
+ @Override
+ public NClob getNClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterName);
+ }
+
+ @Override
+ public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject);
+ }
+
+ @Override
+ public SQLXML getSQLXML(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterIndex);
+ }
+
+ @Override
+ public SQLXML getSQLXML(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterName);
+ }
+
+ @Override
+ public String getNString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterIndex);
+ }
+
+ @Override
+ public String getNString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterName);
+ }
+
+ @Override
+ public Reader getNCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getNCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterName);
+ }
+
+ @Override
+ public Reader getCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterName);
+ }
+
+ @Override
+ public void setBlob(String parameterName, Blob x) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, x);
+ }
+
+ @Override
+ public void setClob(String parameterName, Clob x) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader);
+ }
+
+ @Override
+ public <T> T getObject(int parameterIndex, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, type);
+ }
+
+ @Override
+ public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, type);
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcConnection.java b/src/main/java/com/att/research/mdbc/MdbcConnection.java
new file mode 100644
index 0000000..d471522
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcConnection.java
@@ -0,0 +1,419 @@
+package com.att.research.mdbc;
+
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.exceptions.QueryException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+import com.att.research.mdbc.mixins.MusicInterface;
+import com.att.research.mdbc.mixins.TxCommitProgress;
+
+
+/**
+ * ProxyConnection is a proxy to a JDBC driver Connection. It uses the MusicSqlManager to copy
+ * data to and from Cassandra and the underlying JDBC database as needed. It will notify the underlying
+ * MusicSqlManager of any calls to <code>commit(), rollback()</code> or <code>setAutoCommit()</code>.
+ * Otherwise it just forwards all requests to the underlying Connection of the 'real' database.
+ *
+ * @author Robert Eby
+ */
+public class MdbcConnection implements Connection {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcConnection.class);
+
+ private final String id; // This is the transaction id, assigned to this connection. There is no need to change the id, if connection is reused
+ private final Connection conn; // the JDBC Connection to the actual underlying database
+ private final MusicSqlManager mgr; // there should be one MusicSqlManager in use per Connection
+ private final TxCommitProgress progressKeeper;
+ private final DatabasePartition partition;
+
+ public MdbcConnection(String id, String url, Connection c, Properties info, MusicInterface mi, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException {
+ this.id = id;
+ if (c == null) {
+ throw new MDBCServiceException("Connection is null");
+ }
+ this.conn = c;
+ try {
+ this.mgr = new MusicSqlManager(url, c, info, mi);
+ } catch (MDBCServiceException e) {
+ logger.error("Failure in creating Music SQL Manager");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw e;
+ }
+ try {
+ this.mgr.setAutoCommit(c.getAutoCommit(),null,null,null);
+ } catch (SQLException e) {
+ logger.error("Failure in autocommit");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ }
+
+ // Verify the tables in MUSIC match the tables in the database
+ // and create triggers on any tables that need them
+ //mgr.synchronizeTableData();
+ if ( mgr != null ) try {
+ mgr.synchronizeTables();
+ } catch (QueryException e) {
+ logger.error("Error syncrhonizing tables");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ }
+ else {
+ logger.error(EELFLoggerDelegate.errorLogger, "MusicSqlManager was not correctly created", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL);
+ throw new MDBCServiceException("Music SQL Manager object is null or invalid");
+ }
+ this.progressKeeper = progressKeeper;
+ this.partition = partition;
+ logger.debug("Mdbc connection created with id: "+id);
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxyconn unwrap: " + iface.getName());
+ return conn.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement iswrapperfor: " + iface.getName());
+ return conn.isWrapperFor(iface);
+ }
+
+ @Override
+ public Statement createStatement() throws SQLException {
+ return new MdbcCallableStatement(conn.createStatement(), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql) throws SQLException {
+ //TODO: grab the sql call from here and all the other preparestatement calls
+ return new MdbcPreparedStatement(conn.prepareStatement(sql), sql, mgr);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareCall(sql), mgr);
+ }
+
+ @Override
+ public String nativeSQL(String sql) throws SQLException {
+ return conn.nativeSQL(sql);
+ }
+
+ @Override
+ public void setAutoCommit(boolean autoCommit) throws SQLException {
+ boolean b = conn.getAutoCommit();
+ if (b != autoCommit) {
+ if(progressKeeper!=null) progressKeeper.commitRequested(id);
+ try {
+ mgr.setAutoCommit(autoCommit,id,progressKeeper,partition);
+ if(progressKeeper!=null)
+ progressKeeper.setMusicDone(id);
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL);
+ throw new SQLException("Failure commiting to MUSIC");
+ }
+ conn.setAutoCommit(autoCommit);
+ if(progressKeeper!=null) {
+ progressKeeper.setSQLDone(id);
+ }
+ if(progressKeeper!=null&&progressKeeper.isComplete(id)){
+ progressKeeper.reinitializeTxProgress(id);
+ }
+ }
+ }
+
+ @Override
+ public boolean getAutoCommit() throws SQLException {
+ return conn.getAutoCommit();
+ }
+
+ @Override
+ public void commit() throws SQLException {
+ if(progressKeeper.isComplete(id)) {
+ return;
+ }
+ if(progressKeeper != null) {
+ progressKeeper.commitRequested(id);
+ }
+
+ try {
+ mgr.commit(id,progressKeeper,partition);
+ } catch (MDBCServiceException e) {
+ //If the commit fail, then a new commitId should be used
+ logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL);
+ throw new SQLException("Failure commiting to MUSIC");
+ }
+
+ if(progressKeeper != null) {
+ progressKeeper.setMusicDone(id);
+ }
+
+ conn.commit();
+
+ if(progressKeeper != null) {
+ progressKeeper.setSQLDone(id);
+ }
+ //MusicMixin.releaseZKLocks(MusicMixin.currentLockMap.get(getConnID()));
+ if(progressKeeper.isComplete(id)){
+ progressKeeper.reinitializeTxProgress(id);
+ }
+ }
+
+ @Override
+ public void rollback() throws SQLException {
+ mgr.rollback();
+ conn.rollback();
+ progressKeeper.reinitializeTxProgress(id);
+ }
+
+ @Override
+ public void close() throws SQLException {
+ logger.debug("Closing mdbc connection with id:"+id);
+ if (mgr != null) {
+ logger.debug("Closing mdbc manager with id:"+id);
+ mgr.close();
+ }
+ if (conn != null && !conn.isClosed()) {
+ logger.debug("Closing jdbc from mdbc with id:"+id);
+ conn.close();
+ logger.debug("Connection was closed for id:" + id);
+ }
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return conn.isClosed();
+ }
+
+ @Override
+ public DatabaseMetaData getMetaData() throws SQLException {
+ return conn.getMetaData();
+ }
+
+ @Override
+ public void setReadOnly(boolean readOnly) throws SQLException {
+ conn.setReadOnly(readOnly);
+ }
+
+ @Override
+ public boolean isReadOnly() throws SQLException {
+ return conn.isReadOnly();
+ }
+
+ @Override
+ public void setCatalog(String catalog) throws SQLException {
+ conn.setCatalog(catalog);
+ }
+
+ @Override
+ public String getCatalog() throws SQLException {
+ return conn.getCatalog();
+ }
+
+ @Override
+ public void setTransactionIsolation(int level) throws SQLException {
+ conn.setTransactionIsolation(level);
+ }
+
+ @Override
+ public int getTransactionIsolation() throws SQLException {
+ return conn.getTransactionIsolation();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return conn.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ conn.clearWarnings();
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
+ return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
+ throws SQLException {
+ return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency), sql, mgr);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency), mgr);
+ }
+
+ @Override
+ public Map<String, Class<?>> getTypeMap() throws SQLException {
+ return conn.getTypeMap();
+ }
+
+ @Override
+ public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
+ conn.setTypeMap(map);
+ }
+
+ @Override
+ public void setHoldability(int holdability) throws SQLException {
+ conn.setHoldability(holdability);
+ }
+
+ @Override
+ public int getHoldability() throws SQLException {
+ return conn.getHoldability();
+ }
+
+ @Override
+ public Savepoint setSavepoint() throws SQLException {
+ return conn.setSavepoint();
+ }
+
+ @Override
+ public Savepoint setSavepoint(String name) throws SQLException {
+ return conn.setSavepoint(name);
+ }
+
+ @Override
+ public void rollback(Savepoint savepoint) throws SQLException {
+ conn.rollback(savepoint);
+ }
+
+ @Override
+ public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+ conn.releaseSavepoint(savepoint);
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
+ return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
+ int resultSetHoldability) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability), sql, mgr);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
+ int resultSetHoldability) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
+ return new MdbcPreparedStatement(conn.prepareStatement(sql, autoGeneratedKeys), sql, mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
+ return new MdbcPreparedStatement(conn.prepareStatement(sql, columnIndexes), sql, mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
+ return new MdbcPreparedStatement(conn.prepareStatement(sql, columnNames), sql, mgr);
+ }
+
+ @Override
+ public Clob createClob() throws SQLException {
+ return conn.createClob();
+ }
+
+ @Override
+ public Blob createBlob() throws SQLException {
+ return conn.createBlob();
+ }
+
+ @Override
+ public NClob createNClob() throws SQLException {
+ return conn.createNClob();
+ }
+
+ @Override
+ public SQLXML createSQLXML() throws SQLException {
+ return conn.createSQLXML();
+ }
+
+ @Override
+ public boolean isValid(int timeout) throws SQLException {
+ return conn.isValid(timeout);
+ }
+
+ @Override
+ public void setClientInfo(String name, String value) throws SQLClientInfoException {
+ conn.setClientInfo(name, value);
+ }
+
+ @Override
+ public void setClientInfo(Properties properties) throws SQLClientInfoException {
+ conn.setClientInfo(properties);
+ }
+
+ @Override
+ public String getClientInfo(String name) throws SQLException {
+ return conn.getClientInfo(name);
+ }
+
+ @Override
+ public Properties getClientInfo() throws SQLException {
+ return conn.getClientInfo();
+ }
+
+ @Override
+ public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
+ return conn.createArrayOf(typeName, elements);
+ }
+
+ @Override
+ public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
+ return conn.createStruct(typeName, attributes);
+ }
+
+ @Override
+ public void setSchema(String schema) throws SQLException {
+ conn.setSchema(schema);
+ }
+
+ @Override
+ public String getSchema() throws SQLException {
+ return conn.getSchema();
+ }
+
+ @Override
+ public void abort(Executor executor) throws SQLException {
+ conn.abort(executor);
+ }
+
+ @Override
+ public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
+ conn.setNetworkTimeout(executor, milliseconds);
+ }
+
+ @Override
+ public int getNetworkTimeout() throws SQLException {
+ return conn.getNetworkTimeout();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java b/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java
new file mode 100644
index 0000000..d35a20a
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java
@@ -0,0 +1,743 @@
+package com.att.research.mdbc;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.att.research.logging.EELFLoggerDelegate;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class MdbcPreparedStatement extends MdbcStatement implements PreparedStatement {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcPreparedStatement.class);
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ final String sql; // holds the sql statement if prepared statement
+ String[] params; // holds the parameters if prepared statement, indexing starts at 1
+
+
+ public MdbcPreparedStatement(Statement stmt, MusicSqlManager m) {
+ super(stmt, m);
+ this.sql = null;
+ }
+
+ public MdbcPreparedStatement(Statement stmt, String sql, MusicSqlManager mgr) {
+ super(stmt, sql, mgr);
+ this.sql = sql;
+ //indexing starts at 1
+ params = new String[StringUtils.countMatches(sql, "?")+1];
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = stmt.executeQuery(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: ");
+ stmt.close();
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize");
+ return stmt.getMaxFieldSize();
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ stmt.setMaxFieldSize(max);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ return stmt.getMaxRows();
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ stmt.setMaxRows(max);
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ stmt.setEscapeProcessing(enable);
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ return stmt.getQueryTimeout();
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds);
+ stmt.setQueryTimeout(seconds);
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ stmt.cancel();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return stmt.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ stmt.clearWarnings();
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ stmt.setCursorName(name);
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e);
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return stmt.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return stmt.getUpdateCount();
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return stmt.getMoreResults();
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ stmt.setFetchDirection(direction);
+ }
+
+ @Override
+ public int getFetchDirection() throws SQLException {
+ return stmt.getFetchDirection();
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ stmt.setFetchSize(rows);
+ }
+
+ @Override
+ public int getFetchSize() throws SQLException {
+ return stmt.getFetchSize();
+ }
+
+ @Override
+ public int getResultSetConcurrency() throws SQLException {
+ return stmt.getResultSetConcurrency();
+ }
+
+ @Override
+ public int getResultSetType() throws SQLException {
+ return stmt.getResultSetType();
+ }
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ stmt.addBatch(sql);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ stmt.clearBatch();
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: ");
+ int[] n = null;
+ try {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result.");
+ n = stmt.executeBatch();
+ synchronizeTables(null);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return stmt.getConnection();
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ return stmt.getMoreResults(current);
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ return stmt.getGeneratedKeys();
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return stmt.getResultSetHoldability();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return stmt.isClosed();
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ stmt.setPoolable(poolable);
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ return stmt.isPoolable();
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ stmt.closeOnCompletion();
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return stmt.isCloseOnCompletion();
+ }
+
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = ((PreparedStatement)stmt).executeQuery();;
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ e.printStackTrace();
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = ((PreparedStatement)stmt).executeUpdate();
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ e.printStackTrace();
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+ ((PreparedStatement)stmt).setNull(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+ ((PreparedStatement)stmt).setBoolean(parameterIndex, x);
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+ ((PreparedStatement)stmt).setByte(parameterIndex, x);
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+ ((PreparedStatement)stmt).setShort(parameterIndex, x);
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+ ((PreparedStatement)stmt).setInt(parameterIndex, x);
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+ ((PreparedStatement)stmt).setLong(parameterIndex, x);
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+ ((PreparedStatement)stmt).setFloat(parameterIndex, x);
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+ ((PreparedStatement)stmt).setDouble(parameterIndex, x);
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
+ ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x);
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+ ((PreparedStatement)stmt).setString(parameterIndex, x);
+ params[parameterIndex] = x;
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+ ((PreparedStatement)stmt).setBytes(parameterIndex, x);
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
+ ((PreparedStatement)stmt).setTimestamp(parameterIndex, x);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+ ((PreparedStatement)stmt).clearParameters();
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x);
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = ((PreparedStatement)stmt).execute();
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ e.printStackTrace();
+ String nm = e.getClass().getName();
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+ ((PreparedStatement)stmt).addBatch();
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
+ ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+ ((PreparedStatement)stmt).setRef(parameterIndex, x);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+ ((PreparedStatement)stmt).setBlob(parameterIndex, x);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+ ((PreparedStatement)stmt).setClob(parameterIndex, x);
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+ ((PreparedStatement)stmt).setArray(parameterIndex, x);
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+ return ((PreparedStatement)stmt).getMetaData();
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterIndex, x);
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ return ((CallableStatement)stmt).getParameterMetaData();
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterIndex, x);
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterIndex, value);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader);
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcServer.java b/src/main/java/com/att/research/mdbc/MdbcServer.java
new file mode 100644
index 0000000..54accaa
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcServer.java
@@ -0,0 +1,162 @@
+package com.att.research.mdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import org.apache.calcite.avatica.remote.Driver.Serialization;
+import org.apache.calcite.avatica.remote.LocalService;
+import org.apache.calcite.avatica.server.HttpServer;
+import org.apache.calcite.avatica.util.Unsafe;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.beust.jcommander.IStringConverter;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
+import java.util.Locale;
+import java.util.Properties;
+
+public class MdbcServer {
+ public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(MdbcStatement.class);
+
+ @Parameter(names = { "-c", "--configuration" }, required = true,
+ description = "This is the file that contains the ranges that are assigned to this MDBC server")
+ private String configurationFile;
+
+ @Parameter(names = { "-u", "--url" }, required = true,
+ description = "JDBC driver url for the server")
+ private String url;
+
+ @Parameter(names = { "-p", "--port" }, required = true,
+ description = "Port the server should bind")
+ private int port;
+
+ @Parameter(names = { "-s", "--user" }, required = true,
+ description = "Mysql usr")
+ private String user;
+
+ @Parameter(names = { "-a", "--pass" }, required = true,
+ description = "Mysql password")
+ private String password;
+
+ final private Serialization serialization = Serialization.PROTOBUF;
+
+ @Parameter(names = { "-h", "-help", "--help" }, help = true,
+ description = "Print the help message")
+ private boolean help = false;
+
+ private NodeConfiguration config;
+ private HttpServer server;
+
+ public void start() {
+ if (null != server) {
+ LOG.error("The server was already started");
+ Unsafe.systemExit(ExitCodes.ALREADY_STARTED.ordinal());
+ return;
+ }
+
+ try {
+ config = NodeConfiguration.readJsonFromFile(configurationFile);
+ //\TODO Add configuration file with Server Info
+ Properties connectionProps = new Properties();
+ connectionProps.put("user", user);
+ connectionProps.put("password", password);
+ MdbcServerLogic meta = new MdbcServerLogic(url,connectionProps,config);
+ LocalService service = new LocalService(meta);
+
+ // Construct the server
+ this.server = new HttpServer.Builder<>()
+ .withHandler(service, serialization)
+ .withPort(port)
+ .build();
+
+ // Then start it
+ server.start();
+
+ LOG.info("Started Avatica server on port {} with serialization {}", server.getPort(),
+ serialization);
+ } catch (Exception e) {
+ LOG.error("Failed to start Avatica server", e);
+ Unsafe.systemExit(ExitCodes.START_FAILED.ordinal());
+ }
+ }
+
+ public void stop() {
+ if (null != server) {
+ server.stop();
+ server = null;
+ }
+ }
+
+ public void join() throws InterruptedException {
+ server.join();
+ }
+
+ public static void main(String[] args) {
+ final MdbcServer server = new MdbcServer();
+ @SuppressWarnings("deprecation")
+ JCommander jc = new JCommander(server, args);
+ if (server.help) {
+ jc.usage();
+ Unsafe.systemExit(ExitCodes.USAGE.ordinal());
+ return;
+ }
+
+ server.start();
+
+ // Try to clean up when the server is stopped.
+ Runtime.getRuntime().addShutdownHook(
+ new Thread(new Runnable() {
+ @Override public void run() {
+ LOG.info("Stopping server");
+ server.stop();
+ LOG.info("Server stopped");
+ }
+ }));
+
+ try {
+ server.join();
+ } catch (InterruptedException e) {
+ // Reset interruption
+ Thread.currentThread().interrupt();
+ // And exit now.
+ return;
+ }
+ }
+
+ /**
+ * Converter from String to Serialization. Must be public for JCommander.
+ */
+ public static class SerializationConverter implements IStringConverter<Serialization> {
+ @Override public Serialization convert(String value) {
+ return Serialization.valueOf(value.toUpperCase(Locale.ROOT));
+ }
+ }
+
+ /**
+ * Codes for exit conditions
+ */
+ private enum ExitCodes {
+ NORMAL,
+ ALREADY_STARTED, // 1
+ START_FAILED, // 2
+ USAGE; // 3
+ }
+}
+
+// End StandaloneServer.java
diff --git a/src/main/java/com/att/research/mdbc/MdbcServerLogic.java b/src/main/java/com/att/research/mdbc/MdbcServerLogic.java
new file mode 100644
index 0000000..72cc73c
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcServerLogic.java
@@ -0,0 +1,312 @@
+package com.att.research.mdbc;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.calcite.avatica.MissingResultsException;
+import org.apache.calcite.avatica.NoSuchStatementException;
+import org.apache.calcite.avatica.jdbc.JdbcMeta;
+import org.apache.calcite.avatica.remote.TypedValue;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+
+public class MdbcServerLogic extends JdbcMeta{
+
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcServerLogic.class);
+
+ StateManager manager;
+ DatabasePartition ranges;
+ String name;
+ String sqlDatabase;
+
+ //TODO: Delete this properties after debugging
+ private final Properties info;
+ private final Cache<String, Connection> connectionCache;
+
+ public MdbcServerLogic(String Url, Properties info,NodeConfiguration config) throws SQLException, MDBCServiceException {
+ super(Url,info);
+ this.ranges = config.partition;
+ this.name = config.nodeName;
+ this.sqlDatabase = config.sqlDatabaseName;
+ this.manager = new StateManager(Url,info,this.ranges,this.sqlDatabase);
+ this.info = info;
+ int concurrencyLevel = Integer.parseInt(
+ info.getProperty(ConnectionCacheSettings.CONCURRENCY_LEVEL.key(),
+ ConnectionCacheSettings.CONCURRENCY_LEVEL.defaultValue()));
+ int initialCapacity = Integer.parseInt(
+ info.getProperty(ConnectionCacheSettings.INITIAL_CAPACITY.key(),
+ ConnectionCacheSettings.INITIAL_CAPACITY.defaultValue()));
+ long maxCapacity = Long.parseLong(
+ info.getProperty(ConnectionCacheSettings.MAX_CAPACITY.key(),
+ ConnectionCacheSettings.MAX_CAPACITY.defaultValue()));
+ long connectionExpiryDuration = Long.parseLong(
+ info.getProperty(ConnectionCacheSettings.EXPIRY_DURATION.key(),
+ ConnectionCacheSettings.EXPIRY_DURATION.defaultValue()));
+ TimeUnit connectionExpiryUnit = TimeUnit.valueOf(
+ info.getProperty(ConnectionCacheSettings.EXPIRY_UNIT.key(),
+ ConnectionCacheSettings.EXPIRY_UNIT.defaultValue()));
+ this.connectionCache = CacheBuilder.newBuilder()
+ .concurrencyLevel(concurrencyLevel)
+ .initialCapacity(initialCapacity)
+ .maximumSize(maxCapacity)
+ .expireAfterAccess(connectionExpiryDuration, connectionExpiryUnit)
+ .removalListener(new ConnectionExpiryHandler())
+ .build();
+ }
+
+ @Override
+ protected Connection getConnection(String id) throws SQLException {
+ if (id == null) {
+ throw new NullPointerException("Connection id is null");
+ }
+ //\TODO: don't use connectionCache, use this.manager internal state
+ Connection conn = connectionCache.getIfPresent(id);
+ if (conn == null) {
+ this.manager.CloseConnection(id);
+ logger.error(EELFLoggerDelegate.errorLogger,"Connection not found: invalid id, closed, or expired: "
+ + id);
+ throw new RuntimeException(" Connection not found: invalid id, closed, or expired: " + id);
+ }
+ return conn;
+ }
+
+ @Override
+ public void openConnection(ConnectionHandle ch, Map<String, String> information) {
+ Properties fullInfo = new Properties();
+ fullInfo.putAll(this.info);
+ if (information != null) {
+ fullInfo.putAll(information);
+ }
+
+ final ConcurrentMap<String, Connection> cacheAsMap = this.connectionCache.asMap();
+ if (cacheAsMap.containsKey(ch.id)) {
+ throw new RuntimeException("Connection already exists: " + ch.id);
+ }
+ // Avoid global synchronization of connection opening
+ try {
+ this.manager.OpenConnection(ch.id, info);
+ Connection conn = this.manager.GetConnection(ch.id);
+ if(conn == null) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Connection created was null");
+ throw new RuntimeException("Connection created was null for connection: " + ch.id);
+ }
+ Connection loadedConn = cacheAsMap.putIfAbsent(ch.id, conn);
+ logger.info("connection created with id {}", ch.id);
+ // Race condition: someone beat us to storing the connection in the cache.
+ if (loadedConn != null) {
+ //\TODO check if we added an additional race condition for this
+ this.manager.CloseConnection(ch.id);
+ conn.close();
+ throw new RuntimeException("Connection already exists: " + ch.id);
+ }
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void closeConnection(ConnectionHandle ch) {
+ //\TODO use state connection instead
+ Connection conn = connectionCache.getIfPresent(ch.id);
+ if (conn == null) {
+ logger.debug("client requested close unknown connection {}", ch);
+ return;
+ }
+ logger.trace("closing connection {}", ch);
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw new RuntimeException(e.getMessage());
+ } finally {
+ connectionCache.invalidate(ch.id);
+ this.manager.CloseConnection(ch.id);
+ logger.info("connection closed with id {}", ch.id);
+ }
+ }
+
+ @Override
+ public void commit(ConnectionHandle ch) {
+ try {
+ super.commit(ch);
+ logger.debug("connection commited with id {}", ch.id);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ }
+
+ //\TODO All the following functions can be deleted
+ // Added for two reasons: debugging and logging
+ @Override
+ public StatementHandle prepare(ConnectionHandle ch, String sql, long maxRowCount) {
+ StatementHandle h;
+ try {
+ h = super.prepare(ch, sql, maxRowCount);
+ logger.debug("prepared statement {}", h);
+ } catch (Exception e ) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(e);
+ }
+ return h;
+ }
+
+ @Override
+ public ExecuteResult prepareAndExecute(StatementHandle h, String sql, long maxRowCount, int maxRowsInFirstFrame,
+ PrepareCallback callback) throws NoSuchStatementException {
+ ExecuteResult e;
+ try {
+ e = super.prepareAndExecute(h, sql, maxRowCount,maxRowsInFirstFrame,callback);
+ logger.debug("prepare and execute statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public ExecuteBatchResult prepareAndExecuteBatch(StatementHandle h, List<String> sqlCommands)
+ throws NoSuchStatementException {
+ ExecuteBatchResult e;
+ try {
+ e = super.prepareAndExecuteBatch(h, sqlCommands);
+ logger.debug("prepare and execute batch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public ExecuteBatchResult executeBatch(StatementHandle h, List<List<TypedValue>> parameterValues)
+ throws NoSuchStatementException {
+ ExecuteBatchResult e;
+ try {
+ e = super.executeBatch(h, parameterValues);
+ logger.debug("execute batch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public Frame fetch(StatementHandle h, long offset, int fetchMaxRowCount)
+ throws NoSuchStatementException, MissingResultsException {
+ Frame f;
+ try {
+ f = super.fetch(h, offset, fetchMaxRowCount);
+ logger.debug("fetch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return f;
+ }
+
+ @Override
+ public ExecuteResult execute(StatementHandle h, List<TypedValue> parameterValues, long maxRowCount)
+ throws NoSuchStatementException {
+ ExecuteResult e;
+ try {
+ e = super.execute(h, parameterValues, maxRowCount);
+ logger.debug("fetch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public ExecuteResult execute(StatementHandle h, List<TypedValue> parameterValues, int maxRowsInFirstFrame)
+ throws NoSuchStatementException {
+ ExecuteResult e;
+ try {
+ e = super.execute(h, parameterValues, maxRowsInFirstFrame);
+ logger.debug("fetch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public StatementHandle createStatement(ConnectionHandle ch) {
+ StatementHandle h;
+ try {
+ h = super.createStatement(ch);
+ logger.debug("create statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return h;
+ }
+
+ @Override
+ public void closeStatement(StatementHandle h) {
+ try {
+ super.closeStatement(h);
+ logger.debug("statement closed {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ }
+
+
+
+
+
+
+
+ @Override
+ public void rollback(ConnectionHandle ch) {
+ try {
+ super.rollback(ch);
+ logger.debug("connection rollback with id {}", ch.id);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ }
+
+ private class ConnectionExpiryHandler
+ implements RemovalListener<String, Connection> {
+
+ public void onRemoval(RemovalNotification<String, Connection> notification) {
+ String connectionId = notification.getKey();
+ Connection doomed = notification.getValue();
+ logger.debug("Expiring connection {} because {}", connectionId, notification.getCause());
+ try {
+ if (doomed != null) {
+ doomed.close();
+ }
+ } catch (Throwable t) {
+ logger.warn("Exception thrown while expiring connection {}", connectionId, t);
+ }
+ }
+ }
+}
+
+
diff --git a/src/main/java/com/att/research/mdbc/MdbcStatement.java b/src/main/java/com/att/research/mdbc/MdbcStatement.java
new file mode 100644
index 0000000..e03fbda
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcStatement.java
@@ -0,0 +1,416 @@
+package com.att.research.mdbc;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.Statement;
+
+import com.att.research.exceptions.QueryException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class MdbcStatement implements Statement {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcStatement.class);
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ final Statement stmt; // the Statement that we are proxying
+ final MusicSqlManager mgr;
+ //\TODO We may need to all pass the connection object to support autocommit
+
+ public MdbcStatement(Statement s, MusicSqlManager m) {
+ this.stmt = s;
+ this.mgr = m;
+ }
+
+ public MdbcStatement(Statement stmt, String sql, MusicSqlManager mgr) {
+ //\TODO why there is a constructor with a sql parameter in a not PreparedStatement
+ this.stmt = stmt;
+ this.mgr = mgr;
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName());
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName());
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = stmt.executeQuery(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: ");
+ stmt.close();
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize");
+ return stmt.getMaxFieldSize();
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ stmt.setMaxFieldSize(max);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ return stmt.getMaxRows();
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ stmt.setMaxRows(max);
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ stmt.setEscapeProcessing(enable);
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ return stmt.getQueryTimeout();
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ //\TODO: we also need to implement a higher level timeout in MDBC
+ logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds);
+ stmt.setQueryTimeout(seconds);
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ stmt.cancel();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return stmt.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ stmt.clearWarnings();
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ stmt.setCursorName(name);
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ //\TODO Add the result of the postStatementHook to b
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e);
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return stmt.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return stmt.getUpdateCount();
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return stmt.getMoreResults();
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ stmt.setFetchDirection(direction);
+ }
+
+ @Override
+ public int getFetchDirection() throws SQLException {
+ return stmt.getFetchDirection();
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ stmt.setFetchSize(rows);
+ }
+
+ @Override
+ public int getFetchSize() throws SQLException {
+ return stmt.getFetchSize();
+ }
+
+ @Override
+ public int getResultSetConcurrency() throws SQLException {
+ return stmt.getResultSetConcurrency();
+ }
+
+ @Override
+ public int getResultSetType() throws SQLException {
+ return stmt.getResultSetType();
+ }
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ stmt.addBatch(sql);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ stmt.clearBatch();
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: ");
+ int[] n = null;
+ try {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result.");
+ n = stmt.executeBatch();
+ synchronizeTables(null);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return stmt.getConnection();
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ return stmt.getMoreResults(current);
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ return stmt.getGeneratedKeys();
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ //\TODO Idem to the other execute without columnNames
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return stmt.getResultSetHoldability();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return stmt.isClosed();
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ stmt.setPoolable(poolable);
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ return stmt.isPoolable();
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ stmt.closeOnCompletion();
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return stmt.isCloseOnCompletion();
+ }
+
+ protected void synchronizeTables(String sql) {
+ if (sql == null || sql.trim().toLowerCase().startsWith("create")) {
+ if (mgr != null) {
+ try {
+ mgr.synchronizeTables();
+ } catch (QueryException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/MusicSqlManager.java b/src/main/java/com/att/research/mdbc/MusicSqlManager.java
new file mode 100755
index 0000000..4330cfe
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MusicSqlManager.java
@@ -0,0 +1,300 @@
+package com.att.research.mdbc;
+
+import java.sql.Connection;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+import org.json.JSONObject;
+
+import com.att.research.mdbc.mixins.DBInterface;
+import com.att.research.mdbc.mixins.MixinFactory;
+import com.att.research.mdbc.mixins.MusicInterface;
+import com.att.research.mdbc.mixins.StagingTable;
+import com.att.research.mdbc.mixins.TxCommitProgress;
+import com.att.research.mdbc.mixins.Utils;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.exceptions.QueryException;
+import com.att.research.logging.*;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+
+/**
+* <p>
+* MUSIC SQL Manager - code that helps take data written to a SQL database and seamlessly integrates it
+* with <a href="https://github.com/att/music">MUSIC</a> that maintains data in a No-SQL data-store
+* (<a href="http://cassandra.apache.org/">Cassandra</a>) and protects access to it with a distributed
+* locking service (based on <a href="https://zookeeper.apache.org/">Zookeeper</a>).
+* </p>
+* <p>
+* This code will support transactions by taking note of the value of the autoCommit flag, and of calls
+* to <code>commit()</code> and <code>rollback()</code>. These calls should be made by the user's JDBC
+* client.
+* </p>
+*
+* @author Bharath Balasubramanian, Robert Eby
+*/
+public class MusicSqlManager {
+
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicSqlManager.class);
+
+ private final DBInterface dbi;
+ private final MusicInterface mi;
+ private final Set<String> table_set;
+ private final HashMap<Range,StagingTable> transactionDigest;
+ private boolean autocommit; // a copy of the autocommit flag from the JDBC Connection
+
+ /**
+ * Build a MusicSqlManager for a DB connection. This construct may only be called by getMusicSqlManager(),
+ * which will ensure that only one MusicSqlManager is created per URL.
+ * This is the location where the appropriate mixins to use for the MusicSqlManager should be determined.
+ * They should be picked based upon the URL and the properties passed to this constructor.
+ * <p>
+ * At the present time, we only support the use of the H2Mixin (for access to a local H2 database),
+ * with the CassandraMixin (for direct access to a Cassandra noSQL DB as the persistence layer).
+ * </p>
+ *
+ * @param url the JDBC URL which was used to connection to the database
+ * @param conn the actual connection to the database
+ * @param info properties passed from the initial JDBC connect() call
+ * @throws MDBCServiceException
+ */
+ public MusicSqlManager(String url, Connection conn, Properties info, MusicInterface mi) throws MDBCServiceException {
+ try {
+ info.putAll(Utils.getMdbcProperties());
+ String mixinDb = info.getProperty(Configuration.KEY_DB_MIXIN_NAME, Configuration.DB_MIXIN_DEFAULT);
+ this.dbi = MixinFactory.createDBInterface(mixinDb, this, url, conn, info);
+ this.mi = mi;
+ this.table_set = Collections.synchronizedSet(new HashSet<String>());
+ this.autocommit = true;
+ this.transactionDigest = new HashMap<Range,StagingTable>();
+
+ }catch(Exception e) {
+ throw new MDBCServiceException(e.getMessage());
+ }
+ }
+
+ public void setAutoCommit(boolean b,String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException {
+ if (b != autocommit) {
+ autocommit = b;
+ logger.debug(EELFLoggerDelegate.applicationLogger,"autocommit changed to "+b);
+ if (b) {
+ // My reading is that turning autoCOmmit ON should automatically commit any outstanding transaction
+ if(txId == null || txId.isEmpty()) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Connection ID is null",AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ throw new MDBCServiceException("tx id is null");
+ }
+ commit(txId,progressKeeper,partition);
+ }
+ }
+ }
+
+ /**
+ * Close this MusicSqlManager.
+ */
+ public void close() {
+ if (dbi != null) {
+ dbi.close();
+ }
+ }
+
+ /**
+ * Code to be run within the DB driver before a SQL statement is executed. This is where tables
+ * can be synchronized before a SELECT, for those databases that do not support SELECT triggers.
+ * @param sql the SQL statement that is about to be executed
+ */
+ public void preStatementHook(final String sql) {
+ dbi.preStatementHook(sql);
+ }
+ /**
+ * Code to be run within the DB driver after a SQL statement has been executed. This is where remote
+ * statement actions can be copied back to Cassandra/MUSIC.
+ * @param sql the SQL statement that was executed
+ */
+ public void postStatementHook(final String sql) {
+ dbi.postStatementHook(sql,transactionDigest);
+ }
+ /**
+ * Synchronize the list of tables in SQL with the list in MUSIC. This function should be called when the
+ * proxy first starts, and whenever there is the possibility that tables were created or dropped. It is synchronized
+ * in order to prevent multiple threads from running this code in parallel.
+ */
+ public synchronized void synchronizeTables() throws QueryException {
+ Set<String> set1 = dbi.getSQLTableSet(); // set of tables in the database
+ logger.debug(EELFLoggerDelegate.applicationLogger, "synchronizing tables:" + set1);
+ for (String tableName : set1) {
+ // This map will be filled in if this table was previously discovered
+ if (!table_set.contains(tableName) && !dbi.getReservedTblNames().contains(tableName)) {
+ logger.info(EELFLoggerDelegate.applicationLogger, "New table discovered: "+tableName);
+ try {
+ TableInfo ti = dbi.getTableInfo(tableName);
+ mi.initializeMusicForTable(ti,tableName);
+ //\TODO Verify if table info can be modify in the previous step, if not this step can be deleted
+ ti = dbi.getTableInfo(tableName);
+ mi.createDirtyRowTable(ti,tableName);
+ dbi.createSQLTriggers(tableName);
+ table_set.add(tableName);
+ synchronizeTableData(tableName);
+ logger.debug(EELFLoggerDelegate.applicationLogger, "synchronized tables:" +
+ table_set.size() + "/" + set1.size() + "tables uploaded");
+ } catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ //logger.error(EELFLoggerDelegate.errorLogger, "Exception synchronizeTables: "+e);
+ throw new QueryException();
+ }
+ }
+ }
+
+// Set<String> set2 = getMusicTableSet(music_ns);
+ // not working - fix later
+// for (String tbl : set2) {
+// if (!set1.contains(tbl)) {
+// logger.debug("Old table dropped: "+tbl);
+// dropSQLTriggers(tbl, conn);
+// // ZZTODO drop camunda table ?
+// }
+// }
+ }
+
+ /**
+ * On startup, copy dirty data from Cassandra to H2. May not be needed.
+ * @param tableName
+ */
+ public void synchronizeTableData(String tableName) {
+ // TODO - copy MUSIC -> H2
+ dbi.synchronizeData(tableName);
+ }
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, and should be called by the underlying databases
+ * triggering mechanism. It first checks the local dirty bits table to see if there are any keys in Cassandra whose value
+ * has not yet been sent to SQL. If there are, the appropriate values are copied from Cassandra to the local database.
+ * Under normal execution, this function behaves as a NOP operation.
+ * @param tableName This is the table on which the SELECT is being performed
+ */
+ public void readDirtyRowsAndUpdateDb(String tableName) {
+ mi.readDirtyRowsAndUpdateDb(dbi,tableName);
+ }
+
+
+
+
+ /**
+ * This method gets the primary key that the music interfaces uses by default.
+ * If the front end uses a primary key, this will not match what is used in the MUSIC interface
+ * @return
+ */
+ public String getMusicDefaultPrimaryKeyName() {
+ return mi.getMusicDefaultPrimaryKeyName();
+ }
+
+ /**
+ * Asks music interface to provide the function to create a primary key
+ * e.g. uuid(), 1, "unique_aksd419fjc"
+ * @return
+ */
+ public String generateUniqueKey() {
+ //
+ return mi.generateUniqueKey();
+ }
+
+
+ /**
+ * Perform a commit, as requested by the JDBC driver. If any row updates have been delayed,
+ * they are performed now and copied into MUSIC.
+ * @throws MDBCServiceException
+ */
+ public synchronized void commit(String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException {
+ logger.debug(EELFLoggerDelegate.applicationLogger, " commit ");
+ // transaction was committed -- add all the updates into the REDO-Log in MUSIC
+ try {
+ mi.commitLog(dbi, partition, transactionDigest, txId, progressKeeper);
+ }catch(MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw e;
+ }
+ }
+
+ /**
+ * Perform a rollback, as requested by the JDBC driver. If any row updates have been delayed,
+ * they are discarded.
+ */
+ public synchronized void rollback() {
+ // transaction was rolled back - discard the updates
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Rollback");;
+ transactionDigest.clear();
+ }
+
+ /**
+ * Get all
+ * @param table
+ * @param dbRow
+ * @return
+ */
+ public String getMusicKeyFromRowWithoutPrimaryIndexes(String table, JSONObject dbRow) {
+ TableInfo ti = dbi.getTableInfo(table);
+ return mi.getMusicKeyFromRowWithoutPrimaryIndexes(ti,table, dbRow);
+ }
+
+ public String getMusicKeyFromRow(String table, JSONObject dbRow) {
+ TableInfo ti = dbi.getTableInfo(table);
+ return mi.getMusicKeyFromRow(ti,table, dbRow);
+ }
+
+ /**
+ * Returns all keys that matches the current sql statement, and not in already updated keys.
+ *
+ * @param sql the query that we are getting keys for
+ * @deprecated
+ */
+ public ArrayList<String> getMusicKeys(String sql) {
+ ArrayList<String> musicKeys = new ArrayList<String>();
+ //\TODO See if this is required
+ /*
+ try {
+ net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql);
+ if (stmt instanceof Insert) {
+ Insert s = (Insert) stmt;
+ String tbl = s.getTable().getName();
+ musicKeys.add(generatePrimaryKey());
+ } else {
+ String tbl;
+ String where = "";
+ if (stmt instanceof Update){
+ Update u = (Update) stmt;
+ tbl = u.getTables().get(0).getName();
+ where = u.getWhere().toString();
+ } else if (stmt instanceof Delete) {
+ Delete d = (Delete) stmt;
+ tbl = d.getTable().getName();
+ if (d.getWhere()!=null) {
+ where = d.getWhere().toString();
+ }
+ } else {
+ System.err.println("Not recognized sql type");
+ tbl = "";
+ }
+ String dbiSelect = "SELECT * FROM " + tbl;
+ if (!where.equals("")) {
+ dbiSelect += "WHERE" + where;
+ }
+ ResultSet rs = dbi.executeSQLRead(dbiSelect);
+ musicKeys.addAll(getMusicKeysWhere(tbl, Utils.parseResults(dbi.getTableInfo(tbl), rs)));
+ rs.getStatement().close();
+ }
+ } catch (JSQLParserException | SQLException e) {
+
+ e.printStackTrace();
+ }
+ System.err.print("MusicKeys:");
+ for(String musicKey:musicKeys) {
+ System.out.print(musicKey + ",");
+ }
+ */
+ return musicKeys;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/ProxyStatement.java b/src/main/java/com/att/research/mdbc/ProxyStatement.java
new file mode 100755
index 0000000..0b5edd8
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/ProxyStatement.java
@@ -0,0 +1,1262 @@
+package com.att.research.mdbc;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.Map;
+
+import org.apache.log4j.Logger;
+
+import com.att.research.exceptions.QueryException;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class ProxyStatement implements CallableStatement {
+ private static final Logger logger = Logger.getLogger(ProxyStatement.class);
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ private final Statement stmt; // the Statement that we are proxying
+ private final MusicSqlManager mgr;
+
+ public ProxyStatement(Statement s, MusicSqlManager m) {
+ this.stmt = s;
+ this.mgr = m;
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ logger.debug("executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = stmt.executeQuery(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ stmt.close();
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ return stmt.getMaxFieldSize();
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ stmt.setMaxFieldSize(max);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ return stmt.getMaxRows();
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ stmt.setMaxRows(max);
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ stmt.setEscapeProcessing(enable);
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ return stmt.getQueryTimeout();
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ stmt.setQueryTimeout(seconds);
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ stmt.cancel();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return stmt.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ stmt.clearWarnings();
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ stmt.setCursorName(name);
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.warn("execute: exception "+nm);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return stmt.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return stmt.getUpdateCount();
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return stmt.getMoreResults();
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ stmt.setFetchDirection(direction);
+ }
+
+ @Override
+ public int getFetchDirection() throws SQLException {
+ return stmt.getFetchDirection();
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ stmt.setFetchSize(rows);
+ }
+
+ @Override
+ public int getFetchSize() throws SQLException {
+ return stmt.getFetchSize();
+ }
+
+ @Override
+ public int getResultSetConcurrency() throws SQLException {
+ return stmt.getResultSetConcurrency();
+ }
+
+ @Override
+ public int getResultSetType() throws SQLException {
+ return stmt.getResultSetType();
+ }
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ stmt.addBatch(sql);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ stmt.clearBatch();
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ logger.debug("executeBatch");
+ int[] n = null;
+ try {
+ logger.warn("executeBatch() is not supported by MDBC; your results may be incorrect as a result.");
+ n = stmt.executeBatch();
+ synchronizeTables(null);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeBatch: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return stmt.getConnection();
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ return stmt.getMoreResults(current);
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ return stmt.getGeneratedKeys();
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, String[] columnNames) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return stmt.getResultSetHoldability();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return stmt.isClosed();
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ stmt.setPoolable(poolable);
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ return stmt.isPoolable();
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ stmt.closeOnCompletion();
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return stmt.isCloseOnCompletion();
+ }
+
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+ logger.debug("executeQuery");
+ return ((PreparedStatement)stmt).executeQuery();
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+ logger.debug("executeUpdate");
+ return ((PreparedStatement)stmt).executeUpdate();
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+ ((PreparedStatement)stmt).setNull(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+ ((PreparedStatement)stmt).setBoolean(parameterIndex, x);
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+ ((PreparedStatement)stmt).setByte(parameterIndex, x);
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+ ((PreparedStatement)stmt).setShort(parameterIndex, x);
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+ ((PreparedStatement)stmt).setInt(parameterIndex, x);
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+ ((PreparedStatement)stmt).setLong(parameterIndex, x);
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+ ((PreparedStatement)stmt).setFloat(parameterIndex, x);
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+ ((PreparedStatement)stmt).setDouble(parameterIndex, x);
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
+ ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x);
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+ ((PreparedStatement)stmt).setString(parameterIndex, x);
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+ ((PreparedStatement)stmt).setBytes(parameterIndex, x);
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
+ ((PreparedStatement)stmt).setTimestamp(parameterIndex, x);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+ ((PreparedStatement)stmt).clearParameters();
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x);
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+ return ((PreparedStatement)stmt).execute();
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+ ((PreparedStatement)stmt).addBatch();
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
+ ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+ ((PreparedStatement)stmt).setRef(parameterIndex, x);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+ ((PreparedStatement)stmt).setBlob(parameterIndex, x);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+ ((PreparedStatement)stmt).setClob(parameterIndex, x);
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+ ((PreparedStatement)stmt).setArray(parameterIndex, x);
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+ return ((PreparedStatement)stmt).getMetaData();
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterIndex, x);
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ return ((CallableStatement)stmt).getParameterMetaData();
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterIndex, x);
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterIndex, value);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale);
+ }
+
+ @Override
+ public boolean wasNull() throws SQLException {
+ return ((CallableStatement)stmt).wasNull();
+ }
+
+ @Override
+ public String getString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterIndex);
+ }
+
+ @Override
+ public boolean getBoolean(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterIndex);
+ }
+
+ @Override
+ public byte getByte(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterIndex);
+ }
+
+ @Override
+ public short getShort(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterIndex);
+ }
+
+ @Override
+ public int getInt(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterIndex);
+ }
+
+ @Override
+ public long getLong(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterIndex);
+ }
+
+ @Override
+ public float getFloat(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterIndex);
+ }
+
+ @Override
+ public double getDouble(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterIndex);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale);
+ }
+
+ @Override
+ public byte[] getBytes(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, map);
+ }
+
+ @Override
+ public Ref getRef(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterIndex);
+ }
+
+ @Override
+ public Blob getBlob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterIndex);
+ }
+
+ @Override
+ public Clob getClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterIndex);
+ }
+
+ @Override
+ public Array getArray(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex, cal);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public URL getURL(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterIndex);
+ }
+
+ @Override
+ public void setURL(String parameterName, URL val) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterName, val);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType);
+ }
+
+ @Override
+ public void setBoolean(String parameterName, boolean x) throws SQLException {
+ ((CallableStatement)stmt).setBoolean(parameterName, x);
+ }
+
+ @Override
+ public void setByte(String parameterName, byte x) throws SQLException {
+ ((CallableStatement)stmt).setByte(parameterName, x);
+ }
+
+ @Override
+ public void setShort(String parameterName, short x) throws SQLException {
+ ((CallableStatement)stmt).setShort(parameterName, x);
+ }
+
+ @Override
+ public void setInt(String parameterName, int x) throws SQLException {
+ ((CallableStatement)stmt).setInt(parameterName, x);
+ }
+
+ @Override
+ public void setLong(String parameterName, long x) throws SQLException {
+ ((CallableStatement)stmt).setLong(parameterName, x);
+ }
+
+ @Override
+ public void setFloat(String parameterName, float x) throws SQLException {
+ ((CallableStatement)stmt).setFloat(parameterName, x);
+ }
+
+ @Override
+ public void setDouble(String parameterName, double x) throws SQLException {
+ ((CallableStatement)stmt).setDouble(parameterName, x);
+ }
+
+ @Override
+ public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
+ ((CallableStatement)stmt).setBigDecimal(parameterName, x);
+ }
+
+ @Override
+ public void setString(String parameterName, String x) throws SQLException {
+ ((CallableStatement)stmt).setString(parameterName, x);
+ }
+
+ @Override
+ public void setBytes(String parameterName, byte[] x) throws SQLException {
+ ((CallableStatement)stmt).setBytes(parameterName, x);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x, cal);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public String getString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterName);
+ }
+
+ @Override
+ public boolean getBoolean(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterName);
+ }
+
+ @Override
+ public byte getByte(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterName);
+ }
+
+ @Override
+ public short getShort(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterName);
+ }
+
+ @Override
+ public int getInt(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterName);
+ }
+
+ @Override
+ public long getLong(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterName);
+ }
+
+ @Override
+ public float getFloat(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterName);
+ }
+
+ @Override
+ public double getDouble(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterName);
+ }
+
+ @Override
+ public byte[] getBytes(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName);
+ }
+
+ @Override
+ public Time getTime(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, map);
+ }
+
+ @Override
+ public Ref getRef(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterName);
+ }
+
+ @Override
+ public Blob getBlob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterName);
+ }
+
+ @Override
+ public Clob getClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterName);
+ }
+
+ @Override
+ public Array getArray(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName, cal);
+ }
+
+ @Override
+ public Time getTime(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName, cal);
+ }
+
+ @Override
+ public URL getURL(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterName);
+ }
+
+ @Override
+ public RowId getRowId(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterIndex);
+ }
+
+ @Override
+ public RowId getRowId(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterName);
+ }
+
+ @Override
+ public void setRowId(String parameterName, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterName, x);
+ }
+
+ @Override
+ public void setNString(String parameterName, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterName, value);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader, length);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader, length);
+ }
+
+ @Override
+ public NClob getNClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterIndex);
+ }
+
+ @Override
+ public NClob getNClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterName);
+ }
+
+ @Override
+ public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject);
+ }
+
+ @Override
+ public SQLXML getSQLXML(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterIndex);
+ }
+
+ @Override
+ public SQLXML getSQLXML(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterName);
+ }
+
+ @Override
+ public String getNString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterIndex);
+ }
+
+ @Override
+ public String getNString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterName);
+ }
+
+ @Override
+ public Reader getNCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getNCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterName);
+ }
+
+ @Override
+ public Reader getCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterName);
+ }
+
+ @Override
+ public void setBlob(String parameterName, Blob x) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, x);
+ }
+
+ @Override
+ public void setClob(String parameterName, Clob x) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader);
+ }
+
+ @Override
+ public <T> T getObject(int parameterIndex, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, type);
+ }
+
+ @Override
+ public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, type);
+ }
+
+ private void synchronizeTables(String sql) {
+ if (sql == null || sql.trim().toLowerCase().startsWith("create")) {
+ if (mgr != null) {
+ try {
+ mgr.synchronizeTables();
+ } catch (QueryException e) {
+
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/Range.java b/src/main/java/com/att/research/mdbc/Range.java
new file mode 100644
index 0000000..4d80a51
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/Range.java
@@ -0,0 +1,34 @@
+package com.att.research.mdbc;
+
+import java.io.Serializable;
+
+
+/**
+ * This class represent a range of the whole database
+ * For now a range represents directly a table in Cassandra
+ * In the future we may decide to partition ranges differently
+ * @author Enrique Saurez
+ */
+public class Range implements Serializable {
+
+ private static final long serialVersionUID = 1610744496930800088L;
+
+ final public String table;
+
+ public Range(String table) {
+ this.table = table;
+ }
+
+ /**
+ * Compares to Range types
+ * @param other the other range against which this is compared
+ * @return the equality result
+ */
+ public boolean equal(Range other) {
+ return (table == other.table);
+ }
+
+ public boolean overlaps(Range other) {
+ return table == other.table;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/com/att/research/mdbc/RedoRow.java b/src/main/java/com/att/research/mdbc/RedoRow.java
new file mode 100644
index 0000000..c024fe7
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/RedoRow.java
@@ -0,0 +1,29 @@
+package com.att.research.mdbc;
+
+public class RedoRow {
+ private String redoTableName;
+ private String redoRowIndex;
+
+ public RedoRow(){}
+
+ public RedoRow(String redoTableName, String redoRowIndex){
+ this.redoRowIndex = redoRowIndex;
+ this.redoTableName = redoTableName;
+ }
+
+ public String getRedoTableName() {
+ return redoTableName;
+ }
+
+ public void setRedoTableName(String redoTableName) {
+ this.redoTableName = redoTableName;
+ }
+
+ public String getRedoRowIndex() {
+ return redoRowIndex;
+ }
+
+ public void setRedoRowIndex(String redoRowIndex) {
+ this.redoRowIndex = redoRowIndex;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/StateManager.java b/src/main/java/com/att/research/mdbc/StateManager.java
new file mode 100644
index 0000000..accd13a
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/StateManager.java
@@ -0,0 +1,205 @@
+package com.att.research.mdbc;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+import com.att.research.mdbc.mixins.MixinFactory;
+import com.att.research.mdbc.mixins.MusicInterface;
+import com.att.research.mdbc.mixins.MusicMixin;
+import com.att.research.mdbc.mixins.TxCommitProgress;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * \TODO Implement an interface for the server logic and a factory
+ * @author Enrique Saurez
+ */
+public class StateManager {
+
+ //\TODO We need to fix the auto-commit mode and multiple transactions with the same connection
+
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StateManager.class);
+
+ /**
+ * This is the interface used by all the MusicSqlManagers,
+ * that are created by the MDBC Server
+ * @see MusicInterface
+ */
+ private MusicInterface musicManager;
+ /**
+ * This is the Running Queries information table.
+ * It mainly contains information about the entities
+ * that have being committed so far.
+ */
+ private TxCommitProgress transactionInfo;
+
+ private Map<String,MdbcConnection> mdbcConnections;
+
+ private String sqlDatabase;
+
+ private String url;
+
+ private Properties info;
+
+ @SuppressWarnings("unused")
+ private DatabasePartition ranges;
+
+ public StateManager(String url, Properties info, DatabasePartition ranges, String sqlDatabase) throws MDBCServiceException {
+ this.sqlDatabase=sqlDatabase;
+ this.ranges=ranges;
+ this.url = url;
+ this.info = info;
+ this.transactionInfo = new TxCommitProgress();
+ //\fixme this is not really used, delete!
+ String cassandraUrl = info.getProperty(Configuration.KEY_CASSANDRA_URL, Configuration.CASSANDRA_URL_DEFAULT);
+ String mixin = info.getProperty(Configuration.KEY_MUSIC_MIXIN_NAME, Configuration.MUSIC_MIXIN_DEFAULT);
+ this.musicManager = MixinFactory.createMusicInterface(mixin, cassandraUrl, info,ranges);
+ this.musicManager.createKeyspace();
+ try {
+ this.musicManager.initializeMdbcDataStructures();
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ throw(e);
+ }
+ MusicMixin.loadProperties();
+ this.mdbcConnections = new HashMap<>();
+ initSqlDatabase();
+ }
+
+ protected void initSqlDatabase() throws MDBCServiceException {
+ try {
+ //\TODO: pass the driver as a variable
+ Class.forName("org.mariadb.jdbc.Driver");
+ }
+ catch (ClassNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ return;
+ }
+ try {
+ Connection sqlConnection = DriverManager.getConnection(this.url, this.info);
+ StringBuilder sql = new StringBuilder("CREATE DATABASE IF NOT EXISTS ")
+ .append(sqlDatabase)
+ .append(";");
+ Statement stmt = sqlConnection.createStatement();
+ stmt.execute(sql.toString());
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ throw new MDBCServiceException(e.getMessage());
+ }
+ }
+
+ public void CloseConnection(String connectionId){
+ //\TODO check if there is a race condition
+ if(mdbcConnections.containsKey(connectionId)) {
+ transactionInfo.deleteTxProgress(connectionId);
+ try {
+ Connection conn = mdbcConnections.get(connectionId);
+ if(conn!=null)
+ conn.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ }
+ mdbcConnections.remove(connectionId);
+ }
+ }
+
+ public void OpenConnection(String id, Properties information){
+ if(!mdbcConnections.containsKey(id)){
+ Connection sqlConnection;
+ MdbcConnection newConnection;
+ //Create connection to local SQL DB
+ //\TODO: create function to generate connection outside of open connection and get connection
+ try {
+ //\TODO: pass the driver as a variable
+ Class.forName("org.mariadb.jdbc.Driver");
+ }
+ catch (ClassNotFoundException e) {
+ // TODO Auto-generated catch block
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ return;
+ }
+ try {
+ sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info);
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ sqlConnection = null;
+ }
+ //Create MDBC connection
+ try {
+ newConnection = new MdbcConnection(id, this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges);
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ newConnection = null;
+ return;
+ }
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id);
+ transactionInfo.createNewTransactionTracker(id, sqlConnection);
+ if(newConnection != null) {
+ mdbcConnections.put(id,newConnection);
+ }
+ }
+ }
+
+ /**
+ * This function returns the connection to the corresponding transaction
+ * @param id of the transaction, created using
+ * @return
+ */
+ public Connection GetConnection(String id) {
+ if(mdbcConnections.containsKey(id)) {
+ //\TODO: Verify if this make sense
+ // Intent: reinitialize transaction progress, when it already completed the previous tx for the same connection
+ if(transactionInfo.isComplete(id)) {
+ transactionInfo.reinitializeTxProgress(id);
+ }
+ return mdbcConnections.get(id);
+ }
+
+ Connection sqlConnection;
+ MdbcConnection newConnection;
+ try {
+ //TODO: pass the driver as a variable
+ Class.forName("org.mariadb.jdbc.Driver");
+ }
+ catch (ClassNotFoundException e) {
+ // TODO Auto-generated catch block
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ }
+
+ //Create connection to local SQL DB
+ try {
+ sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info);
+ } catch (SQLException e) {
+ logger.error("sql connection was not created correctly");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ sqlConnection = null;
+ }
+ //Create MDBC connection
+ try {
+ newConnection = new MdbcConnection(id,this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges);
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ newConnection = null;
+ }
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id);
+
+ transactionInfo.createNewTransactionTracker(id, sqlConnection);
+ if(newConnection != null) {
+ mdbcConnections.put(id,newConnection);
+ }
+ return newConnection;
+ }
+
+ public void InitializeSystem() {
+ //\TODO Prefetch data to system using the data ranges as guide
+ throw new UnsupportedOperationException("Function initialize system needs to be implemented id MdbcStateManager");
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/TableInfo.java b/src/main/java/com/att/research/mdbc/TableInfo.java
new file mode 100755
index 0000000..583ba73
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/TableInfo.java
@@ -0,0 +1,75 @@
+package com.att.research.mdbc;
+
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Information about a table in the local database. It consists of three ordered list, which should all have the
+ * same length. A list of column names, a list of DB column types, and a list of booleans specifying which columns are keys.
+ * @author Robert P. Eby
+ */
+public class TableInfo {
+ /** An ordered list of the column names in this table */
+ public List<String> columns;
+ /** An ordered list of the column types in this table; the types are integers taken from {@link java.sql.Types}. */
+ public List<Integer> coltype;
+ /** An ordered list of booleans indicating if a column is a primary key column or not. */
+ public List<Boolean> iskey;
+
+ /** Construct an (initially) empty TableInfo. */
+ public TableInfo() {
+ columns = new ArrayList<String>();
+ coltype = new ArrayList<Integer>();
+ iskey = new ArrayList<Boolean>();
+ }
+ /**
+ * Check whether the column whose name is <i>name</i> is a primary key column.
+ * @param name the column name
+ * @return true if it is, false otherwise
+ */
+ public boolean iskey(String name) {
+ for (int i = 0; i < columns.size(); i++) {
+ if (this.columns.get(i).equalsIgnoreCase(name))
+ return this.iskey.get(i);
+ }
+ return false;
+ }
+ /**
+ * Get the type of the column whose name is <i>name</i>.
+ * @param name the column name
+ * @return the column type or Types.NULL
+ */
+ public int getColType(String name) {
+ for (int i = 0; i < columns.size(); i++) {
+ if (this.columns.get(i).equalsIgnoreCase(name))
+ return this.coltype.get(i);
+ }
+ return Types.NULL;
+ }
+
+ /**
+ * Checks if this table has a primary key
+ * @return
+ */
+ public boolean hasKey() {
+ for (Boolean b: iskey) {
+ if (b) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public List<String> getKeyColumns(){
+ List<String> keys = new ArrayList<String>();
+ int idx = 0;
+ for (Boolean b: iskey) {
+ if (b) {
+ keys.add(this.columns.get(idx));
+ }
+ idx++;
+ }
+ return keys;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java b/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java
new file mode 100644
index 0000000..78850e3
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java
@@ -0,0 +1,71 @@
+package com.att.research.mdbc.configurations;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.MDBCUtils;
+import com.att.research.mdbc.Range;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class NodeConfiguration {
+
+ private static transient final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(NodeConfiguration.class);
+
+ public String sqlDatabaseName;
+ public DatabasePartition partition;
+ public String nodeName;
+
+ public NodeConfiguration(String tables, String titIndex, String titTableName, String partitionId, String sqlDatabaseName, String node, String redoRecordsTable){
+ partition = new DatabasePartition(toRanges(tables), titIndex, titTableName, partitionId, null, redoRecordsTable) ;
+ this.sqlDatabaseName = sqlDatabaseName;
+ this.nodeName = node;
+ }
+
+ protected Set<Range> toRanges(String tables){
+ Set<Range> newRange = new HashSet<>();
+ String[] tablesArray=tables.split(",");
+ for(String table: tablesArray) {
+ newRange.add(new Range(table));
+ }
+ return newRange;
+ }
+
+ public String toJson() {
+ GsonBuilder builder = new GsonBuilder();
+ builder.setPrettyPrinting().serializeNulls();;
+ Gson gson = builder.create();
+ return gson.toJson(this);
+ }
+
+ public void saveToFile(String file){
+ try {
+ String serialized = this.toJson();
+ MDBCUtils.saveToFile(serialized,file,LOG);
+ } catch (IOException e) {
+ e.printStackTrace();
+ // Exit with error
+ System.exit(1);
+ }
+ }
+
+ public static NodeConfiguration readJsonFromFile( String filepath) throws FileNotFoundException {
+ BufferedReader br;
+ try {
+ br = new BufferedReader(
+ new FileReader(filepath));
+ } catch (FileNotFoundException e) {
+ LOG.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e);
+ throw e;
+ }
+ Gson gson = new Gson();
+ NodeConfiguration config = gson.fromJson(br, NodeConfiguration.class);
+ return config;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java b/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java
new file mode 100644
index 0000000..0d28b51
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java
@@ -0,0 +1,180 @@
+package com.att.research.mdbc.configurations;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabaseOperations;
+import com.att.research.mdbc.RedoRow;
+import com.att.research.mdbc.mixins.CassandraMixin;
+import com.google.gson.Gson;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.util.ArrayList;
+import java.util.List;
+
+public class TablesConfiguration {
+
+ private final String TIT_TABLE_NAME = "transactioninformation";
+ private final String REDO_RECORDS_NAME = "redorecords";
+
+ private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TablesConfiguration.class);
+ private List<PartitionInformation> partitions;
+ private String internalNamespace;
+ private int internalReplicationFactor;
+ private String musicNamespace;
+ private String tableToPartitionName;
+ private String partitionInformationTableName;
+ private String redoHistoryTableName;
+ private String sqlDatabaseName;
+
+ public TablesConfiguration(){}
+
+ /**
+ * This functions initalize all the corresponding tables and rows
+ * @return a list of node configurations to be used when starting each of the servers
+ * @throws MDBCServiceException
+ * @apiNote This function assumes that when used, there is not associated redo history in the tables to the tables that are going to be managed by this configuration file
+ */
+ public List<NodeConfiguration> initializeAndCreateNodeConfigurations() throws MDBCServiceException {
+ initInternalNamespace();
+ DatabaseOperations.createNamespace(musicNamespace, internalReplicationFactor);
+ List<NodeConfiguration> nodeConfigs = new ArrayList<>();
+ String ttpName = (tableToPartitionName==null || tableToPartitionName.isEmpty())?CassandraMixin.TABLE_TO_PARTITION_TABLE_NAME:tableToPartitionName;
+ DatabaseOperations.CreateTableToPartitionTable(musicNamespace,ttpName);
+ String pitName = (partitionInformationTableName==null || partitionInformationTableName.isEmpty())?CassandraMixin.PARTITION_INFORMATION_TABLE_NAME:partitionInformationTableName;
+ DatabaseOperations.CreatePartitionInfoTable(musicNamespace,pitName);
+ String rhName = (redoHistoryTableName==null || redoHistoryTableName.isEmpty())?CassandraMixin.REDO_HISTORY_TABLE_NAME:redoHistoryTableName;
+ DatabaseOperations.CreateRedoHistoryTable(musicNamespace,rhName);
+ if(partitions == null){
+ logger.error("Partitions was not correctly initialized");
+ throw new MDBCServiceException("Partition was not correctly initialized");
+ }
+ for(PartitionInformation partitionInfo : partitions){
+ String titTableName = partitionInfo.titTableName;
+ titTableName = (titTableName==null || titTableName.isEmpty())?TIT_TABLE_NAME:titTableName;
+ //0) Create the corresponding TIT table
+ DatabaseOperations.CreateTransactionInformationTable(musicNamespace,titTableName);
+ String redoRecordsName = partitionInfo.rrtTableName;
+ redoRecordsName = (redoRecordsName==null || redoRecordsName.isEmpty())?REDO_RECORDS_NAME:redoRecordsName;
+ DatabaseOperations.CreateRedoRecordsTable(-1,musicNamespace,redoRecordsName);
+ //0) Create the corresponding TIT table
+ String partitionId;
+ if(partitionInfo.partitionId==null || partitionInfo.partitionId.isEmpty()){
+ if(partitionInfo.replicationFactor==0){
+ logger.error("Replication factor and partition id are both empty, and this is an invalid configuration" );
+ throw new MDBCServiceException("Replication factor and partition id are both empty, and this is an invalid configuration");
+ }
+ //1) Create a row in the partition info table
+ partitionId = DatabaseOperations.createPartitionInfoRow(musicNamespace,pitName,partitionInfo.replicationFactor,partitionInfo.tables,null);
+
+ }
+ else{
+ partitionId = partitionInfo.partitionId;
+ }
+ //2) Create a row in the transaction information table
+ String titIndex = DatabaseOperations.CreateEmptyTitRow(musicNamespace,titTableName,partitionId,null);
+ //3) Add owner and tit information to partition info table
+ RedoRow newRedoRow = new RedoRow(titTableName,titIndex);
+ DatabaseOperations.updateRedoRow(musicNamespace,pitName,partitionId,newRedoRow,partitionInfo.owner,null);
+ //4) Update ttp with the new partition
+ for(String table: partitionInfo.tables) {
+ DatabaseOperations.updateTableToPartition(musicNamespace, ttpName, table, partitionId, null);
+ }
+ //5) Add it to the redo history table
+ DatabaseOperations.createRedoHistoryBeginRow(musicNamespace,rhName,newRedoRow,partitionId,null);
+ //6) Create config for this node
+ nodeConfigs.add(new NodeConfiguration(String.join(",",partitionInfo.tables),titIndex,titTableName,partitionId,sqlDatabaseName,partitionInfo.owner,redoRecordsName));
+ }
+ return nodeConfigs;
+ }
+
+ private void initInternalNamespace() throws MDBCServiceException {
+ DatabaseOperations.createNamespace(internalNamespace,internalReplicationFactor);
+ StringBuilder createKeysTableCql = new StringBuilder("CREATE TABLE IF NOT EXISTS ")
+ .append(internalNamespace)
+ .append(".unsynced_keys (key text PRIMARY KEY);");
+ PreparedQueryObject queryObject = new PreparedQueryObject();
+ queryObject.appendQueryString(createKeysTableCql.toString());
+ try {
+ MusicPureCassaCore.createTable(internalNamespace,"unsynced_keys", queryObject,"critical");
+ } catch (MusicServiceException e) {
+ logger.error("Error creating unsynced keys table" );
+ throw new MDBCServiceException("Error creating unsynced keys table");
+ }
+ }
+
+ public static TablesConfiguration readJsonFromFile(String filepath) throws FileNotFoundException {
+ BufferedReader br;
+ try {
+ br = new BufferedReader(
+ new FileReader(filepath));
+ } catch (FileNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e);
+ throw e;
+ }
+ Gson gson = new Gson();
+ TablesConfiguration config = gson.fromJson(br, TablesConfiguration.class);
+ return config;
+ }
+
+ public class PartitionInformation{
+ private List<String> tables;
+ private String owner;
+ private String titTableName;
+ private String rrtTableName;
+ private String partitionId;
+ private int replicationFactor;
+
+ public List<String> getTables() {
+ return tables;
+ }
+
+ public void setTables(List<String> tables) {
+ this.tables = tables;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public void setOwner(String owner) {
+ this.owner = owner;
+ }
+
+ public String getTitTableName() {
+ return titTableName;
+ }
+
+ public void setTitTableName(String titTableName) {
+ this.titTableName = titTableName;
+ }
+
+ public String getPartitionId() {
+ return partitionId;
+ }
+
+ public void setPartitionId(String partitionId) {
+ this.partitionId = partitionId;
+ }
+
+ public int getReplicationFactor() {
+ return replicationFactor;
+ }
+
+ public void setReplicationFactor(int replicationFactor) {
+ this.replicationFactor = replicationFactor;
+ }
+
+ public String getRrtTableName(){
+ return rrtTableName;
+ }
+
+ public void setRrtTableName(String rrtTableName) {
+ this.rrtTableName = rrtTableName;
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/config-0.json b/src/main/java/com/att/research/mdbc/configurations/config-0.json
new file mode 100644
index 0000000..96d947c
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/config-0.json
@@ -0,0 +1,16 @@
+{
+ "sqlDatabaseName": "test",
+ "partition": {
+ "transactionInformationTable": "transactioninformation",
+ "transactionInformationIndex": "259a7a7c-f741-44ae-8d6e-227a02ddc96e",
+ "redoRecordsTable": "redorecords",
+ "partitionId": "ad766447-1adf-4800-aade-9f31a356ab4b",
+ "lockId": "",
+ "ranges": [
+ {
+ "table": "table11"
+ }
+ ]
+ },
+ "nodeName": ""
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/ranges.json b/src/main/java/com/att/research/mdbc/configurations/ranges.json
new file mode 100644
index 0000000..afa343b
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/ranges.json
@@ -0,0 +1,14 @@
+{
+ "transactionInformationTable": "transactioninformation",
+ "transactionInformationIndex": "d0e8ef2e-aeca-4261-8d9d-1679f560b85b",
+ "partitionId": "798110cf-9c61-4db2-9446-cb2dbab5a143",
+ "lockId": "",
+ "ranges": [
+ {
+ "table": "table1"
+ },
+ {
+ "table": "table2"
+ }
+ ]
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json b/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json
new file mode 100644
index 0000000..b3c6224
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json
@@ -0,0 +1,19 @@
+{
+ "partitions" : [
+ {
+ "tables":["table11"],
+ "owner":"",
+ "titTableName":"transactioninformation",
+ "rrtTableName":"redorecords",
+ "partitionId":"",
+ "replicationFactor":1
+ }
+ ],
+ "musicNamespace":"namespace",
+ "tableToPartitionName":"tabletopartition",
+ "partitionInformationTableName":"partitioninfo",
+ "redoHistoryTableName":"redohistory",
+ "sqlDatabaseName":"test",
+ "internalNamespace":"music_internal",
+ "internalReplicationFactor":1
+}
diff --git a/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java b/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java
new file mode 100644
index 0000000..cb43efe
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java
@@ -0,0 +1,125 @@
+package com.att.research.mdbc.examples;
+
+import java.sql.*;
+import org.apache.calcite.avatica.remote.Driver;
+
+public class EtdbTestClient {
+
+ public static class Hr {
+ public final Employee[] emps = {
+ new Employee(100, "Bill"),
+ new Employee(200, "Eric"),
+ new Employee(150, "Sebastian"),
+ };
+ }
+
+ public static class Employee {
+ public final int empid;
+ public final String name;
+
+ public Employee(int empid, String name) {
+ this.empid = empid;
+ this.name = name;
+ }
+ }
+
+ public static void main(String[] args){
+ try {
+ Class.forName("org.apache.calcite.avatica.remote.Driver");
+ } catch (ClassNotFoundException e) {
+ e.printStackTrace();
+ System.exit(1);
+ }
+ Connection connection;
+ try {
+ connection = DriverManager.getConnection("jdbc:avatica:remote:url=http://localhost:30000;serialization=protobuf");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ connection.setAutoCommit(false);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+
+ final String sql = "CREATE TABLE IF NOT EXISTS Persons (\n" +
+ " PersonID int,\n" +
+ " LastName varchar(255),\n" +
+ " FirstName varchar(255),\n" +
+ " Address varchar(255),\n" +
+ " City varchar(255)\n" +
+ ");";
+ Statement stmt;
+ try {
+ stmt = connection.createStatement();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ boolean execute;
+ try {
+ execute = stmt.execute(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ if (execute) {
+ try {
+ connection.commit();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+
+ final String insertSQL = "INSERT INTO Persons VALUES (1, 'Martinez', 'Juan', 'KACB', 'ATLANTA');";
+ Statement insertStmt;
+ try {
+ insertStmt = connection.createStatement();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ execute = insertStmt.execute(insertSQL);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ connection.commit();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ stmt.close();
+ insertStmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+
+ try {
+ connection.commit();
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+
+
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java b/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java
new file mode 100755
index 0000000..cc67edf
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java
@@ -0,0 +1,287 @@
+package com.att.research.mdbc.mixins;
+
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+import org.onap.music.main.ReturnType;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.TableInfo;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+
+/**
+ * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence
+ * to calls to the user's DB. It stores dirty row references in one table (called DIRTY____) rather than one dirty
+ * table per real table (as {@link com.att.research.mdbc.mixins.CassandraMixin} does).
+ *
+ * @author Robert P. Eby
+ */
+public class Cassandra2Mixin extends CassandraMixin {
+ private static final String DIRTY_TABLE = "DIRTY____"; // it seems Cassandra won't allow __DIRTY__
+ private boolean dirty_table_created = false;
+
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Cassandra2Mixin.class);
+
+ public Cassandra2Mixin() {
+ super();
+ }
+
+ public Cassandra2Mixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException {
+ super(url, info,ranges);
+ }
+
+ /**
+ * Get the name of this MusicInterface mixin object.
+ * @return the name
+ */
+ @Override
+ public String getMixinName() {
+ return "cassandra2";
+ }
+ /**
+ * Do what is needed to close down the MUSIC connection.
+ */
+ @Override
+ public void close() {
+ super.close();
+ }
+
+ /**
+ * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables.
+ * The keyspace name comes from the initialization properties passed to the JDBC driver.
+ */
+ @Override
+ public void createKeyspace() {
+ super.createKeyspace();
+ }
+
+ /**
+ * This method performs all necessary initialization in Music/Cassandra to store the table <i>tableName</i>.
+ * @param tableName the table to initialize MUSIC for
+ */
+ @Override
+ public void initializeMusicForTable(TableInfo ti, String tableName) {
+ super.initializeMusicForTable(ti, tableName);
+ }
+
+ /**
+ * Create a <i>dirty row</i> table for the real table <i>tableName</i>. The primary keys columns from the real table are recreated in
+ * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC.
+ * @param tableName the table to create a "dirty" table for
+ */
+ @Override
+ public void createDirtyRowTable(TableInfo ti, String tableName) {
+ if (!dirty_table_created) {
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (tablename TEXT, replica TEXT, keyset TEXT, PRIMARY KEY(tablename, replica, keyset));", music_ns, DIRTY_TABLE);
+ executeMusicWriteQuery(cql);
+ dirty_table_created = true;
+ }
+ }
+ /**
+ * Drop the dirty row table for <i>tableName</i> from MUSIC.
+ * @param tableName the table being dropped
+ */
+ @Override
+ public void dropDirtyRowTable(String tableName) {
+ // no-op
+ }
+
+ private String buildJSON(TableInfo ti, String tableName, Object[] keys) {
+ // Build JSON string representing this keyset
+ JSONObject jo = new JSONObject();
+ int j = 0;
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ jo.put(ti.columns.get(i), keys[j++]);
+ }
+ }
+ return jo.toString();
+ }
+ /**
+ * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys
+ * @param tableName the table we are removing dirty entries from
+ * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row.
+ */
+ @Override
+ public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ String cql = String.format("DELETE FROM %s.%s WHERE tablename = ? AND replica = ? AND keyset = ?;", music_ns, DIRTY_TABLE);
+ //Session sess = getMusicSession();
+ //PreparedStatement ps = getPreparedStatementFromCache(cql);
+ Object[] values = new Object[] { tableName, myId, keys };
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]);
+
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(myId);
+ pQueryObject.addValue(keys);
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage());
+ }
+ /*BoundStatement bound = ps.bind(values);
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ }
+ /**
+ * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica,
+ * and consist of a Map of primary key column names and values.
+ * @param tableName the table we are querying for
+ * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row".
+ */
+ @SuppressWarnings("deprecation")
+ @Override
+ public List<Map<String,Object>> getDirtyRows(TableInfo ti, String tableName) {
+ String cql = String.format("SELECT keyset FROM %s.%s WHERE tablename = ? AND replica = ?;", music_ns, DIRTY_TABLE);
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + tableName + " " + myId);
+
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(myId);
+ ResultSet results = null;
+ try {
+ results = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ }
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(new Object[] { tableName, myId });
+ bound.setReadTimeoutMillis(60000);
+ ResultSet results = null;
+ synchronized (sess) {
+ results = sess.execute(bound);
+ }*/
+ List<Map<String,Object>> list = new ArrayList<Map<String,Object>>();
+ for (Row row : results) {
+ String json = row.getString("keyset");
+ JSONObject jo = new JSONObject(new JSONTokener(json));
+ Map<String,Object> objs = new HashMap<String,Object>();
+ for (String colname : jo.keySet()) {
+ int coltype = ti.getColType(colname);
+ switch (coltype) {
+ case Types.BIGINT:
+ objs.put(colname, jo.getLong(colname));
+ break;
+ case Types.BOOLEAN:
+ objs.put(colname, jo.getBoolean(colname));
+ break;
+ case Types.BLOB:
+ logger.error(EELFLoggerDelegate.errorLogger,"WE DO NOT SUPPORT BLOBS AS PRIMARY KEYS!! COLUMN NAME="+colname);
+ // throw an exception here???
+ break;
+ case Types.DOUBLE:
+ objs.put(colname, jo.getDouble(colname));
+ break;
+ case Types.INTEGER:
+ objs.put(colname, jo.getInt(colname));
+ break;
+ case Types.TIMESTAMP:
+ objs.put(colname, new Date(jo.getString(colname)));
+ break;
+ case Types.VARCHAR:
+ default:
+ objs.put(colname, jo.getString(colname));
+ break;
+ }
+ }
+ list.add(objs);
+ }
+ return list;
+ }
+
+ /**
+ * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first.
+ * @param tableName This is the table that has been dropped
+ */
+ @Override
+ public void clearMusicForTable(String tableName) {
+ super.clearMusicForTable(tableName);
+ }
+ /**
+ * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param oldRow This is a copy of the old row being deleted
+ */
+ public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) {
+ super.deleteFromEntityTableInMusic(ti, tableName, oldRow);
+ }
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local
+ * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL
+ * @param tableName This is the table on which the select is being performed
+ */
+ @Override
+ public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) {
+ super.readDirtyRowsAndUpdateDb(dbi, tableName);
+ }
+
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed
+ */
+ @Override
+ public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) {
+ super.updateDirtyRowAndEntityTableInMusic(ti, tableName, changedRow);
+ }
+
+ /**
+ * Mark rows as "dirty" in the dirty rows table for <i>tableName</i>. Rows are marked for all replicas but
+ * this one (this replica already has the up to date data).
+ * @param tableName the table we are marking dirty
+ * @param keys an ordered list of the values being put into the table. The values that correspond to the tables'
+ * primary key are copied into the dirty row table.
+ */
+ @Deprecated
+ public void markDirtyRow(TableInfo ti, String tableName, Object[] keys) {
+ String cql = String.format("INSERT INTO %s.%s (tablename, replica, keyset) VALUES (?, ?, ?);", music_ns, DIRTY_TABLE);
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);*/
+ @SuppressWarnings("unused")
+ Object[] values = new Object[] { tableName, "", buildJSON(ti, tableName, keys) };
+ PreparedQueryObject pQueryObject = null;
+ for (String repl : allReplicaIds) {
+ /*if (!repl.equals(myId)) {
+ values[1] = repl;
+ logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]);
+
+ BoundStatement bound = ps.bind(values);
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }
+ }*/
+ pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(repl);
+ pQueryObject.addValue(buildJSON(ti, tableName, keys));
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java b/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java
new file mode 100755
index 0000000..6684fe6
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java
@@ -0,0 +1,1288 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.nio.ByteBuffer;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+
+import com.att.research.mdbc.*;
+import org.json.JSONObject;
+import org.onap.music.datastore.CassaLockStore;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicLockingException;
+import org.onap.music.exceptions.MusicQueryException;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+import org.onap.music.main.ResultType;
+import org.onap.music.main.ReturnType;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.ColumnDefinitions;
+import com.datastax.driver.core.DataType;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+import com.datastax.driver.core.Session;
+
+/**
+ * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence
+ * to calls to the user's DB. It does not do any table or row locking.
+ *
+ * <p>This code only supports the following limited list of H2 and Cassandra data types:</p>
+ * <table summary="">
+ * <tr><th>H2 Data Type</th><th>Mapped to Cassandra Data Type</th></tr>
+ * <tr><td>BIGINT</td><td>BIGINT</td></tr>
+ * <tr><td>BOOLEAN</td><td>BOOLEAN</td></tr>
+ * <tr><td>CLOB</td><td>BLOB</td></tr>
+ * <tr><td>DOUBLE</td><td>DOUBLE</td></tr>
+ * <tr><td>INTEGER</td><td>INT</td></tr>
+ * <tr><td>TIMESTAMP</td><td>TIMESTAMP</td></tr>
+ * <tr><td>VARBINARY</td><td>BLOB</td></tr>
+ * <tr><td>VARCHAR</td><td>VARCHAR</td></tr>
+ * </table>
+ *
+ * @author Robert P. Eby
+ */
+public class CassandraMixin implements MusicInterface {
+ /** The property name to use to identify this replica to MusicSqlManager */
+ public static final String KEY_MY_ID = "myid";
+ /** The property name to use for the comma-separated list of replica IDs. */
+ public static final String KEY_REPLICAS = "replica_ids";
+ /** The property name to use to identify the IP address for Cassandra. */
+ public static final String KEY_MUSIC_ADDRESS = "music_address";
+ /** The property name to use to provide the replication factor for Cassandra. */
+ public static final String KEY_MUSIC_RFACTOR = "music_rfactor";
+ /** The property name to use to provide the replication factor for Cassandra. */
+ public static final String KEY_MUSIC_NAMESPACE = "music_namespace";
+ /** The default property value to use for the Cassandra keyspace. */
+ public static final String DEFAULT_MUSIC_KEYSPACE = "mdbc";
+ /** The default property value to use for the Cassandra IP address. */
+ public static final String DEFAULT_MUSIC_ADDRESS = "localhost";
+ /** The default property value to use for the Cassandra replication factor. */
+ public static final int DEFAULT_MUSIC_RFACTOR = 1;
+ /** The default primary string column, if none is provided. */
+ public static final String MDBC_PRIMARYKEY_NAME = "mdbc_cuid";
+ /** Type of the primary key, if none is defined by the user */
+ public static final String MDBC_PRIMARYKEY_TYPE = "uuid";
+ /** Namespace for the tables in MUSIC (Cassandra) */
+ public static final String DEFAULT_MUSIC_NAMESPACE = "namespace";
+
+ /** Name of the tables required for MDBC */
+ public static final String TABLE_TO_PARTITION_TABLE_NAME = "tabletopartition";
+ public static final String PARTITION_INFORMATION_TABLE_NAME = "partitioninfo";
+ public static final String REDO_HISTORY_TABLE_NAME= "redohistory";
+ //\TODO Add logic to change the names when required and create the tables when necessary
+ private String redoRecordTableName = "redorecords";
+ private String transactionInformationTableName = "transactioninformation";
+
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(CassandraMixin.class);
+
+ private static final Map<Integer, String> typemap = new HashMap<>();
+ static {
+ // We only support the following type mappings currently (from DB -> Cassandra).
+ // Anything else will likely cause a NullPointerException
+ typemap.put(Types.BIGINT, "BIGINT"); // aka. IDENTITY
+ typemap.put(Types.BLOB, "VARCHAR");
+ typemap.put(Types.BOOLEAN, "BOOLEAN");
+ typemap.put(Types.CLOB, "BLOB");
+ typemap.put(Types.DATE, "VARCHAR");
+ typemap.put(Types.DOUBLE, "DOUBLE");
+ typemap.put(Types.DECIMAL, "DECIMAL");
+ typemap.put(Types.INTEGER, "INT");
+ //typemap.put(Types.TIMESTAMP, "TIMESTAMP");
+ typemap.put(Types.SMALLINT, "SMALLINT");
+ typemap.put(Types.TIMESTAMP, "VARCHAR");
+ typemap.put(Types.VARBINARY, "BLOB");
+ typemap.put(Types.VARCHAR, "VARCHAR");
+ typemap.put(Types.CHAR, "VARCHAR");
+ //The "Hacks", these don't have a direct mapping
+ //typemap.put(Types.DATE, "VARCHAR");
+ //typemap.put(Types.DATE, "TIMESTAMP");
+ }
+
+ protected DatabasePartition ranges;
+ protected final String music_ns;
+ protected final String myId;
+ protected final String[] allReplicaIds;
+ private final String musicAddress;
+ private final int music_rfactor;
+ private MusicConnector mCon = null;
+ private Session musicSession = null;
+ private boolean keyspace_created = false;
+ private Map<String, PreparedStatement> ps_cache = new HashMap<>();
+ private Set<String> in_progress = Collections.synchronizedSet(new HashSet<String>());
+
+ public CassandraMixin() {
+ //this.logger = null;
+ this.musicAddress = null;
+ this.music_ns = null;
+ this.music_rfactor = 0;
+ this.myId = null;
+ this.allReplicaIds = null;
+ }
+
+ public CassandraMixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException {
+ this.ranges = ranges;
+ // Default values -- should be overridden in the Properties
+ // Default to using the host_ids of the various peers as the replica IDs (this is probably preferred)
+ this.musicAddress = info.getProperty(KEY_MUSIC_ADDRESS, DEFAULT_MUSIC_ADDRESS);
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: musicAddress="+musicAddress);
+
+ String s = info.getProperty(KEY_MUSIC_RFACTOR);
+ this.music_rfactor = (s == null) ? DEFAULT_MUSIC_RFACTOR : Integer.parseInt(s);
+
+ this.myId = info.getProperty(KEY_MY_ID, getMyHostId());
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: myId="+myId);
+
+
+ this.allReplicaIds = info.getProperty(KEY_REPLICAS, getAllHostIds()).split(",");
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: allReplicaIds="+info.getProperty(KEY_REPLICAS, this.myId));
+
+ this.music_ns = info.getProperty(KEY_MUSIC_NAMESPACE,DEFAULT_MUSIC_NAMESPACE);
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: music_ns="+music_ns);
+ transactionInformationTableName = "transactioninformation";
+ createMusicKeyspace();
+ }
+
+ private void createMusicKeyspace() throws MusicServiceException {
+
+ Map<String,Object> replicationInfo = new HashMap<>();
+ replicationInfo.put("'class'", "'SimpleStrategy'");
+ replicationInfo.put("'replication_factor'", music_rfactor);
+
+ PreparedQueryObject queryObject = new PreparedQueryObject();
+ queryObject.appendQueryString(
+ "CREATE KEYSPACE " + this.music_ns + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":"));
+
+ try {
+ MusicPureCassaCore.nonKeyRelatedPut(queryObject, "eventual");
+ } catch (MusicServiceException e) {
+ if (e.getMessage().equals("Keyspace "+this.music_ns+" already exists")) {
+ // ignore
+ } else {
+ throw(e);
+ }
+ }
+ }
+
+ private String getMyHostId() {
+ ResultSet rs = executeMusicRead("SELECT HOST_ID FROM SYSTEM.LOCAL");
+ Row row = rs.one();
+ return (row == null) ? "UNKNOWN" : row.getUUID("HOST_ID").toString();
+ }
+ private String getAllHostIds() {
+ ResultSet results = executeMusicRead("SELECT HOST_ID FROM SYSTEM.PEERS");
+ StringBuilder sb = new StringBuilder(myId);
+ for (Row row : results) {
+ sb.append(",");
+ sb.append(row.getUUID("HOST_ID").toString());
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Get the name of this MusicInterface mixin object.
+ * @return the name
+ */
+ @Override
+ public String getMixinName() {
+ return "cassandra";
+ }
+ /**
+ * Do what is needed to close down the MUSIC connection.
+ */
+ @Override
+ public void close() {
+ if (musicSession != null) {
+ musicSession.close();
+ musicSession = null;
+ }
+ }
+ @Override
+ public void initializeMdbcDataStructures() throws MDBCServiceException {
+ try {
+ DatabaseOperations.CreateRedoRecordsTable(-1, music_ns, redoRecordTableName);//\TODO If we start partitioning the data base, we would need to use the redotable number
+ DatabaseOperations.CreateTransactionInformationTable(music_ns, transactionInformationTableName);
+ DatabaseOperations.CreateTableToPartitionTable(music_ns, TABLE_TO_PARTITION_TABLE_NAME);
+ DatabaseOperations.CreatePartitionInfoTable(music_ns, PARTITION_INFORMATION_TABLE_NAME);
+ DatabaseOperations.CreateRedoHistoryTable(music_ns, REDO_HISTORY_TABLE_NAME);
+ }
+ catch(MDBCServiceException e){
+ logger.error(EELFLoggerDelegate.errorLogger,"Error creating tables in MUSIC");
+ }
+ }
+
+ /**
+ * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables.
+ * The keyspace name comes from the initialization properties passed to the JDBC driver.
+ */
+ @Override
+ public void createKeyspace() {
+ if (keyspace_created == false) {
+ String cql = String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : %d };", music_ns, music_rfactor);
+ executeMusicWriteQuery(cql);
+ keyspace_created = true;
+ }
+ }
+
+ /**
+ * This method performs all necessary initialization in Music/Cassandra to store the table <i>tableName</i>.
+ * @param tableName the table to initialize MUSIC for
+ */
+ @Override
+ public void initializeMusicForTable(TableInfo ti, String tableName) {
+ /**
+ * This code creates two tables for every table in SQL:
+ * (i) a table with the exact same name as the SQL table storing the SQL data.
+ * (ii) a "dirty bits" table that stores the keys in the Cassandra table that are yet to be
+ * updated in the SQL table (they were written by some other node).
+ */
+ StringBuilder fields = new StringBuilder();
+ StringBuilder prikey = new StringBuilder();
+ String pfx = "", pfx2 = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ fields.append(pfx)
+ .append(ti.columns.get(i))
+ .append(" ")
+ .append(typemap.get(ti.coltype.get(i)));
+ if (ti.iskey.get(i)) {
+ // Primary key column
+ prikey.append(pfx2).append(ti.columns.get(i));
+ pfx2 = ", ";
+ }
+ pfx = ", ";
+ }
+ if (prikey.length()==0) {
+ fields.append(pfx).append(MDBC_PRIMARYKEY_NAME)
+ .append(" ")
+ .append(MDBC_PRIMARYKEY_TYPE);
+ prikey.append("mdbc_cuid");
+ }
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", music_ns, tableName, fields.toString(), prikey.toString());
+ executeMusicWriteQuery(cql);
+ }
+
+ // **************************************************
+ // Dirty Tables (in MUSIC) methods
+ // **************************************************
+
+ /**
+ * Create a <i>dirty row</i> table for the real table <i>tableName</i>. The primary keys columns from the real table are recreated in
+ * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC.
+ * @param tableName the table to create a "dirty" table for
+ */
+ @Override
+ public void createDirtyRowTable(TableInfo ti, String tableName) {
+ // create dirtybitsTable at all replicas
+// for (String repl : allReplicaIds) {
+//// String dirtyRowsTableName = "dirty_"+tableName+"_"+allReplicaIds[i];
+//// String dirtyTableQuery = "CREATE TABLE IF NOT EXISTS "+music_ns+"."+ dirtyRowsTableName+" (dirtyRowKeys text PRIMARY KEY);";
+// cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s_%s (dirtyRowKeys TEXT PRIMARY KEY);", music_ns, tableName, repl);
+// executeMusicWriteQuery(cql);
+// }
+ StringBuilder ddl = new StringBuilder("REPLICA__ TEXT");
+ StringBuilder cols = new StringBuilder("REPLICA__");
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ // Only use the primary keys columns in the "Dirty" table
+ ddl.append(", ")
+ .append(ti.columns.get(i))
+ .append(" ")
+ .append(typemap.get(ti.coltype.get(i)));
+ cols.append(", ").append(ti.columns.get(i));
+ }
+ }
+ if(cols.length()==0) {
+ //fixme
+ System.err.println("Create dirty row table found no primary key");
+ }
+ ddl.append(", PRIMARY KEY(").append(cols).append(")");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s (%s);", music_ns, tableName, ddl.toString());
+ executeMusicWriteQuery(cql);
+ }
+ /**
+ * Drop the dirty row table for <i>tableName</i> from MUSIC.
+ * @param tableName the table being dropped
+ */
+ @Override
+ public void dropDirtyRowTable(String tableName) {
+ String cql = String.format("DROP TABLE %s.DIRTY_%s;", music_ns, tableName);
+ executeMusicWriteQuery(cql);
+ }
+ /**
+ * Mark rows as "dirty" in the dirty rows table for <i>tableName</i>. Rows are marked for all replicas but
+ * this one (this replica already has the up to date data).
+ * @param tableName the table we are marking dirty
+ * @param keys an ordered list of the values being put into the table. The values that correspond to the tables'
+ * primary key are copied into the dirty row table.
+ */
+ @Override
+ public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ Object[] keyObj = getObjects(ti,tableName, keys);
+ StringBuilder cols = new StringBuilder("REPLICA__");
+ PreparedQueryObject pQueryObject = null;
+ StringBuilder vals = new StringBuilder("?");
+ List<Object> vallist = new ArrayList<Object>();
+ vallist.add(""); // placeholder for replica
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ cols.append(", ").append(ti.columns.get(i));
+ vals.append(", ").append("?");
+ vallist.add(keyObj[i]);
+ }
+ }
+ if(cols.length()==0) {
+ //FIXME
+ System.err.println("markDIrtyRow need to fix primary key");
+ }
+ String cql = String.format("INSERT INTO %s.DIRTY_%s (%s) VALUES (%s);", music_ns, tableName, cols.toString(), vals.toString());
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);*/
+ String primaryKey;
+ if(ti.hasKey()) {
+ primaryKey = getMusicKeyFromRow(ti,tableName, keys);
+ }
+ else {
+ primaryKey = getMusicKeyFromRowWithoutPrimaryIndexes(ti,tableName, keys);
+ }
+ System.out.println("markDirtyRow: PK value: "+primaryKey);
+
+ Object pkObj = null;
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ pkObj = keyObj[i];
+ }
+ }
+ for (String repl : allReplicaIds) {
+ pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(repl);
+ pQueryObject.addValue(pkObj);
+ updateMusicDB(tableName, primaryKey, pQueryObject);
+ //if (!repl.equals(myId)) {
+ /*logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql);
+ vallist.set(0, repl);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ //}
+
+ }
+ }
+ /**
+ * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys
+ * @param tableName the table we are removing dirty entries from
+ * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row.
+ */
+ @Override
+ public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ Object[] keysObjects = getObjects(ti,tableName,keys);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ StringBuilder cols = new StringBuilder("REPLICA__=?");
+ List<Object> vallist = new ArrayList<Object>();
+ vallist.add(myId);
+ int n = 0;
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ cols.append(" AND ").append(ti.columns.get(i)).append("=?");
+ vallist.add(keysObjects[n++]);
+ pQueryObject.addValue(keysObjects[n++]);
+ }
+ }
+ String cql = String.format("DELETE FROM %s.DIRTY_%s WHERE %s;", music_ns, tableName, cols.toString());
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql);
+ pQueryObject.appendQueryString(cql);
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while cleanDirtyRow..."+rt.getMessage());
+ }
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ }
+ /**
+ * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica,
+ * and consist of a Map of primary key column names and values.
+ * @param tableName the table we are querying for
+ * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row".
+ */
+ @Override
+ public List<Map<String,Object>> getDirtyRows(TableInfo ti, String tableName) {
+ String cql = String.format("SELECT * FROM %s.DIRTY_%s WHERE REPLICA__=?;", music_ns, tableName);
+ ResultSet results = null;
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql);
+
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(new Object[] { myId });
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ results = sess.execute(bound);
+ }*/
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ try {
+ results = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+
+ e.printStackTrace();
+ }
+
+ ColumnDefinitions cdef = results.getColumnDefinitions();
+ List<Map<String,Object>> list = new ArrayList<Map<String,Object>>();
+ for (Row row : results) {
+ Map<String,Object> objs = new HashMap<String,Object>();
+ for (int i = 0; i < cdef.size(); i++) {
+ String colname = cdef.getName(i).toUpperCase();
+ String coltype = cdef.getType(i).getName().toString().toUpperCase();
+ if (!colname.equals("REPLICA__")) {
+ switch (coltype) {
+ case "BIGINT":
+ objs.put(colname, row.getLong(colname));
+ break;
+ case "BOOLEAN":
+ objs.put(colname, row.getBool(colname));
+ break;
+ case "BLOB":
+ objs.put(colname, row.getString(colname));
+ break;
+ case "DATE":
+ objs.put(colname, row.getString(colname));
+ break;
+ case "DOUBLE":
+ objs.put(colname, row.getDouble(colname));
+ break;
+ case "DECIMAL":
+ objs.put(colname, row.getDecimal(colname));
+ break;
+ case "INT":
+ objs.put(colname, row.getInt(colname));
+ break;
+ case "TIMESTAMP":
+ objs.put(colname, row.getTimestamp(colname));
+ break;
+ case "VARCHAR":
+ default:
+ objs.put(colname, row.getString(colname));
+ break;
+ }
+ }
+ }
+ list.add(objs);
+ }
+ return list;
+ }
+
+ /**
+ * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first.
+ * @param tableName This is the table that has been dropped
+ */
+ @Override
+ public void clearMusicForTable(String tableName) {
+ dropDirtyRowTable(tableName);
+ String cql = String.format("DROP TABLE %s.%s;", music_ns, tableName);
+ executeMusicWriteQuery(cql);
+ }
+ /**
+ * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param oldRow This is a copy of the old row being deleted
+ */
+ @Override
+ public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) {
+ Object[] objects = getObjects(ti,tableName,oldRow);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ if (ti.hasKey()) {
+ assert(ti.columns.size() == objects.length);
+ } else {
+ assert(ti.columns.size()+1 == objects.length);
+ }
+
+ StringBuilder where = new StringBuilder();
+ List<Object> vallist = new ArrayList<Object>();
+ String pfx = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ where.append(pfx)
+ .append(ti.columns.get(i))
+ .append("=?");
+ vallist.add(objects[i]);
+ pQueryObject.addValue(objects[i]);
+ pfx = " AND ";
+ }
+ }
+ if (!ti.hasKey()) {
+ where.append(MDBC_PRIMARYKEY_NAME + "=?");
+ //\FIXME this is wrong, old row is not going to contain the UUID, this needs to be fixed
+ vallist.add(UUID.fromString((String) objects[0]));
+ pQueryObject.addValue(UUID.fromString((String) objects[0]));
+ }
+
+ String cql = String.format("DELETE FROM %s.%s WHERE %s;", music_ns, tableName, where.toString());
+ logger.error(EELFLoggerDelegate.errorLogger,"Executing MUSIC write:"+ cql);
+ pQueryObject.appendQueryString(cql);
+
+ /*PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ Session sess = getMusicSession();
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ String primaryKey = getMusicKeyFromRow(ti,tableName, oldRow);
+ if(MusicMixin.criticalTables.contains(tableName)) {
+ ReturnType rt = null;
+ try {
+ rt = MusicPureCassaCore.atomicPut(music_ns, tableName, primaryKey, pQueryObject, null);
+ } catch (MusicLockingException e) {
+ e.printStackTrace();
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ } catch (MusicQueryException e) {
+ e.printStackTrace();
+ }
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ } else {
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ }
+ // Mark the dirty rows in music for all the replicas but us
+ markDirtyRow(ti,tableName, oldRow);
+ }
+
+ public Set<String> getMusicTableSet(String ns) {
+ Set<String> set = new TreeSet<String>();
+ String cql = String.format("SELECT TABLE_NAME FROM SYSTEM_SCHEMA.TABLES WHERE KEYSPACE_NAME = '%s'", ns);
+ ResultSet rs = executeMusicRead(cql);
+ for (Row row : rs) {
+ set.add(row.getString("TABLE_NAME").toUpperCase());
+ }
+ return set;
+ }
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local
+ * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL
+ * @param tableName This is the table on which the select is being performed
+ */
+ @Override
+ public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) {
+ // Read dirty rows of this table from Music
+ TableInfo ti = dbi.getTableInfo(tableName);
+ List<Map<String,Object>> objlist = getDirtyRows(ti,tableName);
+ PreparedQueryObject pQueryObject = null;
+ String pre_cql = String.format("SELECT * FROM %s.%s WHERE ", music_ns, tableName);
+ List<Object> vallist = new ArrayList<Object>();
+ StringBuilder sb = new StringBuilder();
+ //\TODO Perform a batch operation instead of each row at a time
+ for (Map<String,Object> map : objlist) {
+ pQueryObject = new PreparedQueryObject();
+ sb.setLength(0);
+ vallist.clear();
+ String pfx = "";
+ for (String key : map.keySet()) {
+ sb.append(pfx).append(key).append("=?");
+ vallist.add(map.get(key));
+ pQueryObject.addValue(map.get(key));
+ pfx = " AND ";
+ }
+
+ String cql = pre_cql + sb.toString();
+ System.out.println("readDirtyRowsAndUpdateDb: cql: "+cql);
+ pQueryObject.appendQueryString(cql);
+ ResultSet dirtyRows = null;
+ try {
+ //\TODO Why is this an eventual put?, this should be an atomic
+ dirtyRows = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+
+ e.printStackTrace();
+ }
+ /*
+ Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ ResultSet dirtyRows = null;
+ synchronized (sess) {
+ dirtyRows = sess.execute(bound);
+ }*/
+ List<Row> rows = dirtyRows.all();
+ if (rows.isEmpty()) {
+ // No rows, the row must have been deleted
+ deleteRowFromSqlDb(dbi,tableName, map);
+ } else {
+ for (Row row : rows) {
+ writeMusicRowToSQLDb(dbi,tableName, row);
+ }
+ }
+ }
+ }
+
+ private void deleteRowFromSqlDb(DBInterface dbi, String tableName, Map<String, Object> map) {
+ dbi.deleteRowFromSqlDb(tableName, map);
+ TableInfo ti = dbi.getTableInfo(tableName);
+ List<Object> vallist = new ArrayList<Object>();
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ String col = ti.columns.get(i);
+ Object val = map.get(col);
+ vallist.add(val);
+ }
+ }
+ cleanDirtyRow(ti, tableName, new JSONObject(vallist));
+ }
+ /**
+ * This functions copies the contents of a row in Music into the corresponding row in the SQL table
+ * @param tableName This is the name of the table in both Music and swl
+ * @param musicRow This is the row in Music that is being copied into SQL
+ */
+ private void writeMusicRowToSQLDb(DBInterface dbi, String tableName, Row musicRow) {
+ // First construct the map of columns and their values
+ TableInfo ti = dbi.getTableInfo(tableName);
+ Map<String, Object> map = new HashMap<String, Object>();
+ List<Object> vallist = new ArrayList<Object>();
+ String rowid = tableName;
+ for (String col : ti.columns) {
+ Object val = getValue(musicRow, col);
+ map.put(col, val);
+ if (ti.iskey(col)) {
+ vallist.add(val);
+ rowid += "_" + val.toString();
+ }
+ }
+
+ logger.debug("Blocking rowid: "+rowid);
+ in_progress.add(rowid); // Block propagation of the following INSERT/UPDATE
+
+ dbi.insertRowIntoSqlDb(tableName, map);
+
+ logger.debug("Unblocking rowid: "+rowid);
+ in_progress.remove(rowid); // Unblock propagation
+
+// try {
+// String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString());
+// executeSQLWrite(sql);
+// } catch (SQLException e) {
+// logger.debug("Insert failed because row exists, do an update");
+// // TODO - rewrite this UPDATE command should not update key fields
+// String sql = String.format("UPDATE %s SET (%s) = (%s) WHERE %s", tableName, fields.toString(), values.toString(), where.toString());
+// try {
+// executeSQLWrite(sql);
+// } catch (SQLException e1) {
+// e1.printStackTrace();
+// }
+// }
+
+ ti = dbi.getTableInfo(tableName);
+ cleanDirtyRow(ti, tableName, new JSONObject(vallist));
+
+// String selectQuery = "select "+ primaryKeyName+" FROM "+tableName+" WHERE "+primaryKeyName+"="+primaryKeyValue+";";
+// java.sql.ResultSet rs = executeSQLRead(selectQuery);
+// String dbWriteQuery=null;
+// try {
+// if(rs.next()){//this entry is there, do an update
+// dbWriteQuery = "UPDATE "+tableName+" SET "+columnNameString+" = "+ valueString +"WHERE "+primaryKeyName+"="+primaryKeyValue+";";
+// }else
+// dbWriteQuery = "INSERT INTO "+tableName+" VALUES"+valueString+";";
+// executeSQLWrite(dbWriteQuery);
+// } catch (SQLException e) {
+// // ZZTODO Auto-generated catch block
+// e.printStackTrace();
+// }
+
+ //clean the music dirty bits table
+// String dirtyRowIdsTableName = music_ns+".DIRTY_"+tableName+"_"+myId;
+// String deleteQuery = "DELETE FROM "+dirtyRowIdsTableName+" WHERE dirtyRowKeys=$$"+primaryKeyValue+"$$;";
+// executeMusicWriteQuery(deleteQuery);
+ }
+ private Object getValue(Row musicRow, String colname) {
+ ColumnDefinitions cdef = musicRow.getColumnDefinitions();
+ DataType colType;
+ try {
+ colType= cdef.getType(colname);
+ }
+ catch(IllegalArgumentException e) {
+ logger.warn("Colname is not part of table metadata: "+e);
+ throw e;
+ }
+ String typeStr = colType.getName().toString().toUpperCase();
+ switch (typeStr) {
+ case "BIGINT":
+ return musicRow.getLong(colname);
+ case "BOOLEAN":
+ return musicRow.getBool(colname);
+ case "BLOB":
+ return musicRow.getString(colname);
+ case "DATE":
+ return musicRow.getString(colname);
+ case "DECIMAL":
+ return musicRow.getDecimal(colname);
+ case "DOUBLE":
+ return musicRow.getDouble(colname);
+ case "SMALLINT":
+ case "INT":
+ return musicRow.getInt(colname);
+ case "TIMESTAMP":
+ return musicRow.getTimestamp(colname);
+ case "UUID":
+ return musicRow.getUUID(colname);
+ default:
+ logger.error(EELFLoggerDelegate.errorLogger, "UNEXPECTED COLUMN TYPE: columname="+colname+", columntype="+typeStr);
+ // fall thru
+ case "VARCHAR":
+ return musicRow.getString(colname);
+ }
+ }
+
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed
+ */
+ @Override
+ public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) {
+ // Build the CQL command
+ Object[] objects = getObjects(ti,tableName,changedRow);
+ StringBuilder fields = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ String rowid = tableName;
+ Object[] newrow = new Object[objects.length];
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ String pfx = "";
+ int keyoffset=0;
+ for (int i = 0; i < objects.length; i++) {
+ if (!ti.hasKey() && i==0) {
+ //We need to tack on cassandra's uid in place of a primary key
+ fields.append(MDBC_PRIMARYKEY_NAME);
+ values.append("?");
+ newrow[i] = UUID.fromString((String) objects[i]);
+ pQueryObject.addValue(newrow[i]);
+ keyoffset=-1;
+ pfx = ", ";
+ continue;
+ }
+ fields.append(pfx).append(ti.columns.get(i+keyoffset));
+ values.append(pfx).append("?");
+ pfx = ", ";
+ if (objects[i] instanceof byte[]) {
+ // Cassandra doesn't seem to have a Codec to translate a byte[] to a ByteBuffer
+ newrow[i] = ByteBuffer.wrap((byte[]) objects[i]);
+ pQueryObject.addValue(newrow[i]);
+ } else if (objects[i] instanceof Reader) {
+ // Cassandra doesn't seem to have a Codec to translate a Reader to a ByteBuffer either...
+ newrow[i] = ByteBuffer.wrap(readBytesFromReader((Reader) objects[i]));
+ pQueryObject.addValue(newrow[i]);
+ } else {
+ newrow[i] = objects[i];
+ pQueryObject.addValue(newrow[i]);
+ }
+ if (i+keyoffset>=0 && ti.iskey.get(i+keyoffset)) {
+ rowid += "_" + newrow[i].toString();
+ }
+ }
+
+ if (in_progress.contains(rowid)) {
+ // This call to updateDirtyRowAndEntityTableInMusic() was called as a result of a Cassandra -> H2 update; ignore
+ logger.debug(EELFLoggerDelegate.applicationLogger, "updateDirtyRowAndEntityTableInMusic: bypassing MUSIC update on "+rowid);
+
+ } else {
+ // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update
+ String cql = String.format("INSERT INTO %s.%s (%s) VALUES (%s);", music_ns, tableName, fields.toString(), values.toString());
+
+ pQueryObject.appendQueryString(cql);
+ String primaryKey = getMusicKeyFromRow(ti,tableName, changedRow);
+ updateMusicDB(tableName, primaryKey, pQueryObject);
+
+ /*PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(newrow);
+ bound.setReadTimeoutMillis(60000);
+ Session sess = getMusicSession();
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ // Mark the dirty rows in music for all the replicas but us
+ markDirtyRow(ti,tableName, changedRow);
+ }
+ }
+
+
+
+ private byte[] readBytesFromReader(Reader rdr) {
+ StringBuilder sb = new StringBuilder();
+ try {
+ int ch;
+ while ((ch = rdr.read()) >= 0) {
+ sb.append((char)ch);
+ }
+ } catch (IOException e) {
+ logger.warn("readBytesFromReader: "+e);
+ }
+ return sb.toString().getBytes();
+ }
+
+ protected PreparedStatement getPreparedStatementFromCache(String cql) {
+ // Note: have to hope that the Session never changes!
+ if (!ps_cache.containsKey(cql)) {
+ Session sess = getMusicSession();
+ PreparedStatement ps = sess.prepare(cql);
+ ps_cache.put(cql, ps);
+ }
+ return ps_cache.get(cql);
+ }
+
+ /**
+ * This method gets a connection to Music
+ * @return the Cassandra Session to use
+ */
+ protected Session getMusicSession() {
+ // create cassandra session
+ if (musicSession == null) {
+ logger.info(EELFLoggerDelegate.applicationLogger, "Creating New Music Session");
+ mCon = new MusicConnector(musicAddress);
+ musicSession = mCon.getSession();
+ }
+ return musicSession;
+ }
+
+ /**
+ * This method executes a write query in Music
+ * @param cql the CQL to be sent to Cassandra
+ */
+ protected void executeMusicWriteQuery(String cql) {
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage());
+ }
+ /*Session sess = getMusicSession();
+ SimpleStatement s = new SimpleStatement(cql);
+ s.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(s);
+ }*/
+ }
+
+ /**
+ * This method executes a read query in Music
+ * @param cql the CQL to be sent to Cassandra
+ * @return a ResultSet containing the rows returned from the query
+ */
+ protected ResultSet executeMusicRead(String cql) {
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ ResultSet results = null;
+ try {
+ results = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+
+ e.printStackTrace();
+ }
+ return results;
+ /*Session sess = getMusicSession();
+ synchronized (sess) {
+ return sess.execute(cql);
+ }*/
+ }
+
+ /**
+ * Returns the default primary key name that this mixin uses
+ */
+ public String getMusicDefaultPrimaryKeyName() {
+ return MDBC_PRIMARYKEY_NAME;
+ }
+
+ /**
+ * Return the function for cassandra's primary key generation
+ */
+ public String generateUniqueKey() {
+ return UUID.randomUUID().toString();
+ }
+
+ @Override
+ public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow) {
+ //\TODO this operation is super expensive to perform, both latency and BW
+ // it is better to add additional where clauses, and have the primary key
+ // to be composed of known columns of the table
+ // Adding this primary indexes would be an additional burden to the developers, which spanner
+ // also does, but otherwise performance is really bad
+ // At least it should have a set of columns that are guaranteed to be unique
+ StringBuilder cqlOperation = new StringBuilder();
+ cqlOperation.append("SELECT * FROM ")
+ .append(music_ns)
+ .append(".")
+ .append(table);
+ ResultSet musicResults = executeMusicRead(cqlOperation.toString());
+ Object[] dbRowObjects = getObjects(ti,table,dbRow);
+ while (!musicResults.isExhausted()) {
+ Row musicRow = musicResults.one();
+ if (rowIs(ti, musicRow, dbRowObjects)) {
+ return ((UUID)getValue(musicRow, MDBC_PRIMARYKEY_NAME)).toString();
+ }
+ }
+ //should never reach here
+ return null;
+ }
+
+ /**
+ * Checks to see if this row is in list of database entries
+ * @param ti
+ * @param musicRow
+ * @param dbRow
+ * @return
+ */
+ private boolean rowIs(TableInfo ti, Row musicRow, Object[] dbRow) {
+ //System.out.println("Comparing " + musicRow.toString());
+ boolean sameRow=true;
+ for (int i=0; i<ti.columns.size(); i++) {
+ Object val = getValue(musicRow, ti.columns.get(i));
+ if (!dbRow[i].equals(val)) {
+ sameRow=false;
+ break;
+ }
+ }
+ return sameRow;
+ }
+
+ @Override
+ public String getMusicKeyFromRow(TableInfo ti, String tableName, JSONObject row) {
+ List<String> keyCols = ti.getKeyColumns();
+ if(keyCols.isEmpty()){
+ throw new IllegalArgumentException("Table doesn't have defined primary indexes ");
+ }
+ StringBuilder key = new StringBuilder();
+ String pfx = "";
+ for(String keyCol: keyCols) {
+ key.append(pfx);
+ key.append(row.getString(keyCol));
+ pfx = ",";
+ }
+ String keyStr = key.toString();
+ return keyStr;
+ }
+
+ public void updateMusicDB(String tableName, String primaryKey, PreparedQueryObject pQObject) {
+ if(MusicMixin.criticalTables.contains(tableName)) {
+ ReturnType rt = null;
+ try {
+ rt = MusicPureCassaCore.atomicPut(music_ns, tableName, primaryKey, pQObject, null);
+ } catch (MusicLockingException e) {
+ e.printStackTrace();
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ } catch (MusicQueryException e) {
+ e.printStackTrace();
+ }
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ } else {
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ }
+ }
+
+
+ private PreparedQueryObject createAppendRRTIndexToTitQuery(String titTable, String uuid, String table, String redoUuid){
+ PreparedQueryObject query = new PreparedQueryObject();
+ StringBuilder appendBuilder = new StringBuilder();
+ appendBuilder.append("UPDATE ")
+ .append(music_ns)
+ .append(".")
+ .append(titTable)
+ .append(" SET redo = redo +[('")
+ .append(table)
+ .append("',")
+ .append(redoUuid)
+ .append(")] WHERE id = ")
+ .append(uuid)
+ .append(";");
+ query.appendQueryString(appendBuilder.toString());
+ return query;
+ }
+
+ protected String createAndAssignLock(String fullyQualifiedKey, DatabasePartition partition, String keyspace, String table, String key) throws MDBCServiceException {
+ String lockId;
+ lockId = MusicPureCassaCore.createLockReference(fullyQualifiedKey);
+ ReturnType lockReturn;
+ try {
+ lockReturn = MusicPureCassaCore.acquireLock(fullyQualifiedKey,lockId);
+ } catch (MusicLockingException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Lock was not acquire correctly for key "+fullyQualifiedKey);
+ throw new MDBCServiceException("Lock was not acquire correctly for key "+fullyQualifiedKey);
+ } catch (MusicServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey);
+ } catch (MusicQueryException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey);
+ }
+ //\TODO this is wrong, we should have a better way to obtain a lock forcefully, clean the queue and obtain the lock
+ if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) {
+ try {
+ MusicPureCassaCore.releaseLock(fullyQualifiedKey,lockId,false);
+ CassaLockStore lockingServiceHandle = MusicPureCassaCore.getLockingServiceHandle();
+ UUID uuid = lockingServiceHandle.peekLockQueue(keyspace, table, key);
+ String uuidStr = uuid.toString();
+ while(uuidStr != lockId) {
+ MusicPureCassaCore.releaseLock(fullyQualifiedKey, uuid.toString(), false);
+ try {
+ uuid = lockingServiceHandle.peekLockQueue(keyspace, table, key);
+ uuidStr = uuid.toString();
+ } catch(NullPointerException e){
+ //Ignore null pointer exception
+ lockId = MusicPureCassaCore.createLockReference(fullyQualifiedKey);
+ uuidStr = lockId;
+ }
+ }
+ lockReturn = MusicPureCassaCore.acquireLock(fullyQualifiedKey,lockId);
+
+ } catch (MusicLockingException e) {
+ throw new MDBCServiceException("Could not lock the corresponding lock");
+ } catch (MusicServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey);
+ } catch (MusicQueryException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey);
+ }
+ }
+ if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) {
+ throw new MDBCServiceException("Could not lock the corresponding lock");
+ }
+ //TODO: Java newbie here, verify that this lockId is actually assigned to the global DatabasePartition in the StateManager instance
+ partition.setLockId(lockId);
+ return lockId;
+ }
+
+ protected void pushRowToRRT(String lockId, String commitId, HashMap<Range,StagingTable> transactionDigest) throws MDBCServiceException{
+ PreparedQueryObject query = new PreparedQueryObject();
+ StringBuilder cqlQuery = new StringBuilder("INSERT INTO ")
+ .append(music_ns)
+ .append('.')
+ .append(redoRecordTableName)
+ .append(" (leaseid,leasecounter,transactiondigest) ")
+ .append("VALUES ('")
+ .append( lockId ).append("',")
+ .append( commitId ).append(",'");
+ try {
+ cqlQuery.append( MDBCUtils.toString(transactionDigest) );
+ } catch (IOException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId);
+ throw new MDBCServiceException("Transaction Digest serialization was invalid for commit "+commitId);
+ }
+ cqlQuery.append("');");
+ query.appendQueryString(cqlQuery.toString());
+ //\TODO check if I am not shooting on my own foot
+ try {
+ MusicPureCassaCore.nonKeyRelatedPut(query,"critical");
+ } catch (MusicServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId);
+ throw new MDBCServiceException("Transaction Digest serialization for commit "+commitId);
+ }
+ }
+
+ protected void appendIndexToTit(String lockId, String commitId, String TITIndex) throws MDBCServiceException{
+ StringBuilder redoUuidBuilder = new StringBuilder();
+ redoUuidBuilder.append("('")
+ .append(lockId)
+ .append("',")
+ .append(commitId)
+ .append(")");
+ PreparedQueryObject appendQuery = createAppendRRTIndexToTitQuery(transactionInformationTableName, TITIndex, redoRecordTableName, redoUuidBuilder.toString());
+ ReturnType returnType = MusicPureCassaCore.criticalPut(music_ns, transactionInformationTableName, TITIndex, appendQuery, lockId, null);
+ if(returnType.getResult().compareTo(ResultType.SUCCESS) != 0 ){
+ logger.error(EELFLoggerDelegate.errorLogger, "Error when executing append operation with return type: "+returnType.getMessage());
+ throw new MDBCServiceException("Error when executing append operation with return type: "+returnType.getMessage());
+ }
+ }
+
+ @Override
+ public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap<Range,StagingTable> transactionDigest, String txId ,TxCommitProgress progressKeeper) throws MDBCServiceException{
+ String TITIndex = partition.getTransactionInformationIndex();
+ if(TITIndex.isEmpty()) {
+ //\TODO Fetch TITIndex from the Range Information Table
+ throw new MDBCServiceException("TIT Index retrieval not yet implemented");
+ }
+ String fullyQualifiedTitKey = music_ns+"."+ transactionInformationTableName +"."+TITIndex;
+ //0. See if reference to lock was already created
+ String lockId = partition.getLockId();
+ if(lockId == null || lockId.isEmpty()) {
+ lockId = createAndAssignLock(fullyQualifiedTitKey,partition,music_ns,transactionInformationTableName,TITIndex);
+ }
+
+ String commitId;
+ //Generate a local commit id
+ if(progressKeeper.containsTx(txId)) {
+ commitId = progressKeeper.getCommitId(txId).toString();
+ }
+ else{
+ logger.error(EELFLoggerDelegate.errorLogger, "Tx with id "+txId+" was not created in the TxCommitProgress ");
+ throw new MDBCServiceException("Tx with id "+txId+" was not created in the TxCommitProgress ");
+ }
+ //Add creation type of transaction digest
+
+ //1. Push new row to RRT and obtain its index
+ pushRowToRRT(lockId, commitId, transactionDigest);
+
+ //2. Save RRT index to RQ
+ if(progressKeeper!= null) {
+ progressKeeper.setRecordId(txId,new RedoRecordId(lockId, commitId));
+ }
+ //3. Append RRT index into the corresponding TIT row array
+ appendIndexToTit(lockId,commitId,TITIndex);
+ }
+
+ /**
+ * @param tableName
+ * @param string
+ * @param rowValues
+ * @return
+ */
+ @SuppressWarnings("unused")
+ private String getUid(String tableName, String string, Object[] rowValues) {
+ //
+ // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update
+ String cql = String.format("SELECT * FROM %s.%s;", music_ns, tableName);
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind();
+ bound.setReadTimeoutMillis(60000);
+ Session sess = getMusicSession();
+ ResultSet rs;
+ synchronized (sess) {
+ rs = sess.execute(bound);
+ }
+
+ //
+ //should never reach here
+ logger.error(EELFLoggerDelegate.errorLogger, "Could not find the row in the primary key");
+
+ return null;
+ }
+
+ @Override
+ public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) {
+ // \FIXME: we may need to add the primary key of the row if it was autogenerated by MUSIC
+ List<String> cols = ti.columns;
+ int size = cols.size();
+ boolean hasDefault = false;
+ if(row.has(getMusicDefaultPrimaryKeyName())) {
+ size++;
+ hasDefault = true;
+ }
+
+ Object[] objects = new Object[size];
+ int idx = 0;
+ if(hasDefault) {
+ objects[idx++] = row.getString(getMusicDefaultPrimaryKeyName());
+ }
+ for(String col : ti.columns) {
+ objects[idx]=row.get(col);
+ }
+ return objects;
+ }
+
+ @Override
+ public TransactionInformationElement getTransactionInformation(String id){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TitReference createTransactionInformationRow(TransactionInformationElement info){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void appendToRedoLog(TitReference titRow, DatabasePartition partition, RedoRecordId newRecord){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void appendRedoRecord(String redoRecordTable, RedoRecordId newRecord, String transactionDigest){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updateTablePartition(String table, DatabasePartition partition){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TitReference createPartition(List<String> tables, int replicationFactor, String currentOwner){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updatePartitionOwner(String partition, String owner){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updateTitReference(String partition, TitReference tit){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updatePartitionReplicationFactor(String partition, int replicationFactor){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addRedoHistory(DatabasePartition partition, TitReference newTit, List<TitReference> old){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<RedoHistoryElement> getHistory(DatabasePartition partition){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<PartitionInformation> getPartitionInformation(DatabasePartition partition){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TablePartitionInformation getTablePartitionInformation(String table){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public HashMap<Range,StagingTable> getTransactionDigest(RedoRecordId id){
+ throw new UnsupportedOperationException();
+ }
+
+ }
diff --git a/src/main/java/com/att/research/mdbc/mixins/DBInterface.java b/src/main/java/com/att/research/mdbc/mixins/DBInterface.java
new file mode 100755
index 0000000..9aa94f9
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/DBInterface.java
@@ -0,0 +1,91 @@
+package com.att.research.mdbc.mixins;
+
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+
+/**
+ * This Interface defines the methods that MDBC needs in order to mirror data to/from a Database instance.
+ *
+ * @author Robert P. Eby
+ */
+public interface DBInterface {
+ /**
+ * Get the name of this DBnterface mixin object.
+ * @return the name
+ */
+ String getMixinName();
+ /**
+ * Do what is needed to close down the database connection.
+ */
+ void close();
+ /**
+ * Get a set of the table names in the database. The table names should be returned in UPPER CASE.
+ * @return the set
+ */
+ Set<String> getSQLTableSet();
+ /**
+ * Return the name of the database that the driver is connected to
+ * @return
+ */
+ String getDatabaseName();
+ /**
+ * Return a TableInfo object for the specified table.
+ * @param tableName the table to look up
+ * @return a TableInfo object containing the info we need, or null if the table does not exist
+ */
+ TableInfo getTableInfo(String tableName);
+ /**
+ * This method should create triggers in the database to be called for each row after every INSERT,
+ * UPDATE and DELETE, and before every SELECT.
+ * @param tableName this is the table on which triggers are being created.
+ */
+ void createSQLTriggers(String tableName);
+ /**
+ * This method should drop all triggers previously created in the database for the table.
+ * @param tableName this is the table on which triggers are being dropped.
+ */
+ void dropSQLTriggers(String tableName);
+ /**
+ * This method inserts a row into the SQL database, defined via a map of column names and values.
+ * @param tableName the table to insert the row into
+ * @param map map of column names &rarr; values to use for the keys when inserting the row
+ */
+ void insertRowIntoSqlDb(String tableName, Map<String, Object> map);
+ /**
+ * This method deletes a row from the SQL database, defined via a map of column names and values.
+ * @param tableName the table to delete the row from
+ * @param map map of column names &rarr; values to use for the keys when deleting the row
+ */
+ void deleteRowFromSqlDb(String tableName, Map<String, Object> map);
+ /**
+ * Code to be run within the DB driver before a SQL statement is executed. This is where tables
+ * can be synchronized before a SELECT, for those databases that do not support SELECT triggers.
+ * @param sql the SQL statement that is about to be executed
+ */
+ void preStatementHook(final String sql);
+ /**
+ * Code to be run within the DB driver after a SQL statement has been executed. This is where remote
+ * statement actions can be copied back to Cassandra/MUSIC.
+ * @param sql the SQL statement that was executed
+ * @param transactionDigest
+ */
+ void postStatementHook(final String sql,Map<Range,StagingTable> transactionDigest);
+ /**
+ * This method executes a read query in the SQL database. Methods that call this method should be sure
+ * to call resultset.getStatement().close() when done in order to free up resources.
+ * @param sql the query to run
+ * @return a ResultSet containing the rows returned from the query
+ */
+ ResultSet executeSQLRead(String sql);
+
+ void synchronizeData(String tableName);
+
+ List<String> getReservedTblNames();
+
+ String getPrimaryKey(String sql, String tableName);
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java b/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java
new file mode 100755
index 0000000..68d2986
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java
@@ -0,0 +1,125 @@
+package com.att.research.mdbc.mixins;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.sql.Connection;
+import java.util.Properties;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.MusicSqlManager;
+
+/**
+ * This class is used to construct instances of Mixins that implement either the {@link com.att.research.mdbc.mixins.DBInterface}
+ * interface, or the {@link com.att.research.mdbc.mixins.MusicInterface} interface. The Mixins are searched for in the CLASSPATH.
+ *
+ * @author Robert P. Eby
+ */
+public class MixinFactory {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MixinFactory.class);
+
+ // Only static methods...
+ private MixinFactory(){}
+
+ /**
+ * Look for a class in CLASSPATH that implements the {@link DBInterface} interface, and has the mixin name <i>name</i>.
+ * If one is found, construct and return it, using the other arguments for the constructor.
+ * @param name the name of the Mixin
+ * @param msm the MusicSqlManager to use as an argument to the constructor
+ * @param url the URL to use as an argument to the constructor
+ * @param conn the underlying JDBC Connection
+ * @param info the Properties to use as an argument to the constructor
+ * @return the newly constructed DBInterface, or null if one cannot be found.
+ */
+ public static DBInterface createDBInterface(String name, MusicSqlManager msm, String url, Connection conn, Properties info) {
+ for (Class<?> cl : Utils.getClassesImplementing(DBInterface.class)) {
+ try {
+ Constructor<?> con = cl.getConstructor();
+ if (con != null) {
+ DBInterface dbi = (DBInterface) con.newInstance();
+ String miname = dbi.getMixinName();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Checking "+miname);
+ if (miname.equalsIgnoreCase(name)) {
+ con = cl.getConstructor(MusicSqlManager.class, String.class, Connection.class, Properties.class);
+ if (con != null) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname);
+ return (DBInterface) con.newInstance(msm, url, conn, info);
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createDBInterface: "+e);
+ }
+ }
+ return null;
+ }
+ /**
+ * Look for a class in CLASSPATH that implements the {@link MusicInterface} interface, and has the mixin name <i>name</i>.
+ * If one is found, construct and return it, using the other arguments for the constructor.
+ * @param name the name of the Mixin
+ * @param msm the MusicSqlManager to use as an argument to the constructor
+ * @param dbi the DBInterface to use as an argument to the constructor
+ * @param url the URL to use as an argument to the constructor
+ * @param info the Properties to use as an argument to the constructor
+ * @return the newly constructed MusicInterface, or null if one cannot be found.
+ */
+ public static MusicInterface createMusicInterface(String name, String url, Properties info, DatabasePartition ranges) {
+ for (Class<?> cl : Utils.getClassesImplementing(MusicInterface.class)) {
+ try {
+ Constructor<?> con = cl.getConstructor();
+ if (con != null) { //TODO: is this necessary? Don't think it could ever be null?
+ MusicInterface mi = (MusicInterface) con.newInstance();
+ String miname = mi.getMixinName();
+ logger.info(EELFLoggerDelegate.applicationLogger, "Checking "+miname);
+ if (miname.equalsIgnoreCase(name)) {
+ con = cl.getConstructor(String.class, Properties.class, DatabasePartition.class);
+ if (con != null) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname);
+ return (MusicInterface) con.newInstance(url, info, ranges);
+ }
+ }
+ }
+ } catch (InvocationTargetException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e.getCause().toString());
+ }
+ catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e);
+ }
+ }
+ return null;
+ }
+
+ // Unfortunately, this version does not work when MDBC is built as a JBoss module,
+ // where something funny is happening with the classloaders
+// @SuppressWarnings("unused")
+// private static List<Class<?>> getClassesImplementingOld(Class<?> implx) {
+// List<Class<?>> list = new ArrayList<Class<?>>();
+// try {
+// ClassLoader cldr = MixinFactory.class.getClassLoader();
+// while (cldr != null) {
+// ClassPath cp = ClassPath.from(cldr);
+// for (ClassPath.ClassInfo x : cp.getAllClasses()) {
+// if (x.toString().startsWith("com.att.")) { // mixins must have a package starting with com.att.
+// Class<?> cl = x.load();
+// if (impl(cl, implx)) {
+// list.add(cl);
+// }
+// }
+// }
+// cldr = cldr.getParent();
+// }
+// } catch (IOException e) {
+// // ignore
+// }
+// return list;
+// }
+ static boolean impl(Class<?> cl, Class<?> imp) {
+ for (Class<?> c2 : cl.getInterfaces()) {
+ if (c2 == imp) {
+ return true;
+ }
+ }
+ Class<?> c2 = cl.getSuperclass();
+ return (c2 != null) ? impl(c2, imp) : false;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java b/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java
new file mode 100755
index 0000000..ea32a85
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java
@@ -0,0 +1,124 @@
+package com.att.research.mdbc.mixins;
+
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.List;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.HostDistance;
+import com.datastax.driver.core.Metadata;
+import com.datastax.driver.core.PoolingOptions;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.exceptions.NoHostAvailableException;
+import org.onap.music.main.MusicPureCassaCore;
+
+/**
+ * This class allows for management of the Cassandra Cluster and Session objects.
+ *
+ * @author Robert P. Eby
+ */
+public class MusicConnector {
+
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicConnector.class);
+
+ private Session session;
+ private Cluster cluster;
+
+ protected MusicConnector() {
+ //to defeat instantiation since this is a singleton
+ }
+
+ public MusicConnector(String address) {
+// connectToCassaCluster(address);
+ connectToMultipleAddresses(address);
+ }
+
+ public Session getSession() {
+ return session;
+ }
+
+ public void close() {
+ if (session != null)
+ session.close();
+ session = null;
+ if (cluster != null)
+ cluster.close();
+ cluster = null;
+ }
+
+ private List<String> getAllPossibleLocalIps(){
+ ArrayList<String> allPossibleIps = new ArrayList<String>();
+ try {
+ Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces();
+ while(en.hasMoreElements()){
+ NetworkInterface ni=(NetworkInterface) en.nextElement();
+ Enumeration<InetAddress> ee = ni.getInetAddresses();
+ while(ee.hasMoreElements()) {
+ InetAddress ia= (InetAddress) ee.nextElement();
+ allPossibleIps.add(ia.getHostAddress());
+ }
+ }
+ } catch (SocketException e) {
+ e.printStackTrace();
+ }
+ return allPossibleIps;
+ }
+
+ private void connectToMultipleAddresses(String address) {
+ MusicPureCassaCore.getDSHandle(address);
+ /*
+ PoolingOptions poolingOptions =
+ new PoolingOptions()
+ .setConnectionsPerHost(HostDistance.LOCAL, 4, 10)
+ .setConnectionsPerHost(HostDistance.REMOTE, 2, 4);
+ String[] music_hosts = address.split(",");
+ if (cluster == null) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"Initializing MUSIC Client with endpoints "+address);
+ cluster = Cluster.builder()
+ .withPort(9042)
+ .withPoolingOptions(poolingOptions)
+ .withoutMetrics()
+ .addContactPoints(music_hosts)
+ .build();
+ Metadata metadata = cluster.getMetadata();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address);
+
+ }
+ session = cluster.connect();
+ */
+ }
+
+ @SuppressWarnings("unused")
+ private void connectToCassaCluster(String address) {
+ PoolingOptions poolingOptions =
+ new PoolingOptions()
+ .setConnectionsPerHost(HostDistance.LOCAL, 4, 10)
+ .setConnectionsPerHost(HostDistance.REMOTE, 2, 4);
+ Iterator<String> it = getAllPossibleLocalIps().iterator();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Iterating through possible ips:"+getAllPossibleLocalIps());
+
+ while (it.hasNext()) {
+ try {
+ cluster = Cluster.builder()
+ .withPort(9042)
+ .withPoolingOptions(poolingOptions)
+ .withoutMetrics()
+ .addContactPoint(address)
+ .build();
+ //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE);
+ Metadata metadata = cluster.getMetadata();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address);
+
+ session = cluster.connect();
+ break;
+ } catch (NoHostAvailableException e) {
+ address = it.next();
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java b/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java
new file mode 100755
index 0000000..94b3ac6
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java
@@ -0,0 +1,178 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.json.JSONObject;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+import org.onap.music.exceptions.MusicLockingException;
+
+/**
+ * This Interface defines the methods that MDBC needs for a class to provide access to the persistence layer of MUSIC.
+ *
+ * @author Robert P. Eby
+ */
+public interface MusicInterface {
+ /**
+ * This function is used to created all the required data structures, both local
+ * \TODO Check if this function is required in the MUSIC interface or could be just created on the constructor
+ */
+ void initializeMdbcDataStructures() throws MDBCServiceException;
+ /**
+ * Get the name of this MusicInterface mixin object.
+ * @return the name
+ */
+ String getMixinName();
+ /**
+ * Gets the name of this MusicInterface mixin's default primary key name
+ * @return default primary key name
+ */
+ String getMusicDefaultPrimaryKeyName();
+ /**
+ * generates a key or placeholder for what is required for a primary key
+ * @return a primary key
+ */
+ String generateUniqueKey();
+
+ /**
+ * Find the key used with Music for a table that was created without a primary index
+ * Name is long to avoid developers using it. For cassandra performance in this operation
+ * is going to be really bad
+ * @param ti information of the table in the SQL layer
+ * @param table name of the table
+ * @param dbRow row obtained from the SQL layer
+ * @return key associated with the row
+ */
+ String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow);
+ /**
+ * Do what is needed to close down the MUSIC connection.
+ */
+ void close();
+ /**
+ * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables.
+ * The keyspace name comes from the initialization properties passed to the JDBC driver.
+ */
+ void createKeyspace();
+ /**
+ * This method performs all necessary initialization in Music/Cassandra to store the table <i>tableName</i>.
+ * @param tableName the table to initialize MUSIC for
+ */
+ void initializeMusicForTable(TableInfo ti, String tableName);
+ /**
+ * Create a <i>dirty row</i> table for the real table <i>tableName</i>. The primary keys columns from the real table are recreated in
+ * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC.
+ * @param tableName the table to create a "dirty" table for
+ */
+ void createDirtyRowTable(TableInfo ti, String tableName);
+ /**
+ * Drop the dirty row table for <i>tableName</i> from MUSIC.
+ * @param tableName the table being dropped
+ */
+ void dropDirtyRowTable(String tableName);
+ /**
+ * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first.
+ * @param tableName This is the table that has been dropped
+ */
+ void clearMusicForTable(String tableName);
+ /**
+ * Mark rows as "dirty" in the dirty rows table for <i>tableName</i>. Rows are marked for all replicas but
+ * this one (this replica already has the up to date data).
+ * @param tableName the table we are marking dirty
+ * @param keys an ordered list of the values being put into the table. The values that correspond to the tables'
+ * primary key are copied into the dirty row table.
+ */
+ void markDirtyRow(TableInfo ti, String tableName, JSONObject keys);
+ /**
+ * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys
+ * @param tableName the table we are removing dirty entries from
+ * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row.
+ */
+ void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys);
+ /**
+ * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica,
+ * and consist of a Map of primary key column names and values.
+ * @param tableName the table we are querying for
+ * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row".
+ */
+ List<Map<String,Object>> getDirtyRows(TableInfo ti, String tableName);
+ /**
+ * This method is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates
+ * it to the other replicas.
+ * @param tableName This is the table that has changed.
+ * @param oldRow This is a copy of the old row being deleted
+ */
+ void deleteFromEntityTableInMusic(TableInfo ti,String tableName, JSONObject oldRow);
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local
+ * dirty bits table to see if there are any rows in Cassandra whose value needs to be copied to the local SQL DB.
+ * @param tableName This is the table on which the select is being performed
+ */
+ void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName);
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates
+ * it to the other replicas.
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed
+ */
+ void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow);
+
+ Object[] getObjects(TableInfo ti, String tableName, JSONObject row);
+ /**
+ * Returns the primary key associated with the given row
+ * @param ti info of the table that is associated with the row
+ * @param tableName name of the table that contains the row
+ * @param changedRow row that is going to contain the information associated with the primary key
+ * @return primary key of the row
+ */
+ String getMusicKeyFromRow(TableInfo ti, String tableName, JSONObject changedRow);
+
+ /**
+ * Commits the corresponding REDO-log into MUSIC
+ *
+ * @param dbi, the database interface use in the local SQL cache, where the music interface is being used
+ * @param partition
+ * @param transactionDigest digest of the transaction that is being committed into the Redo log in music. It has to be a HashMap, because it is required to be serializable
+ * @param txId id associated with the log being send
+ * @param progressKeeper data structure that is used to handle to detect failures, and know what to do
+ * @throws MDBCServiceException
+ */
+ void commitLog(DBInterface dbi, DatabasePartition partition, HashMap<Range,StagingTable> transactionDigest, String txId,TxCommitProgress progressKeeper) throws MDBCServiceException;
+
+ TransactionInformationElement getTransactionInformation(String id);
+
+ TitReference createTransactionInformationRow(TransactionInformationElement info);
+
+ void appendToRedoLog(TitReference titRow, DatabasePartition partition, RedoRecordId newRecord);
+
+ void appendRedoRecord(String redoRecordTable, RedoRecordId newRecord, String transactionDigest);
+
+ void updateTablePartition(String table, DatabasePartition partition);
+
+ TitReference createPartition(List<String> tables, int replicationFactor, String currentOwner);
+
+ void updatePartitionOwner(String partition, String owner);
+
+ void updateTitReference(String partition, TitReference tit);
+
+ void updatePartitionReplicationFactor(String partition, int replicationFactor);
+
+ void addRedoHistory(DatabasePartition partition, TitReference newTit, List<TitReference> old);
+
+ List<RedoHistoryElement> getHistory(DatabasePartition partition);
+
+ List<PartitionInformation> getPartitionInformation(DatabasePartition partition);
+
+ TablePartitionInformation getTablePartitionInformation(String table);
+
+ HashMap<Range,StagingTable> getTransactionDigest(RedoRecordId id);
+
+
+}
+
diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java b/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java
new file mode 100644
index 0000000..1fee59c
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java
@@ -0,0 +1,249 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import com.att.research.mdbc.LockId;
+import org.json.JSONObject;
+import org.onap.music.exceptions.MusicLockingException;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+import org.onap.music.main.MusicPureCassaCore;
+
+/**
+
+ *
+ */
+public class MusicMixin implements MusicInterface {
+
+ public static Map<Integer, Set<String>> currentLockMap = new HashMap<>();
+ public static List<String> criticalTables = new ArrayList<>();
+
+ @Override
+ public String getMixinName() {
+ //
+ return null;
+ }
+
+ @Override
+ public String getMusicDefaultPrimaryKeyName() {
+ //
+ return null;
+ }
+
+ @Override
+ public String generateUniqueKey() {
+ //
+ return null;
+ }
+
+ @Override
+ public String getMusicKeyFromRow(TableInfo ti, String table, JSONObject dbRow) {
+ //
+ return null;
+ }
+
+ @Override
+ public void close() {
+ //
+
+ }
+
+ @Override
+ public void createKeyspace() {
+ //
+
+ }
+
+ @Override
+ public void initializeMusicForTable(TableInfo ti, String tableName) {
+ //
+
+ }
+
+ @Override
+ public void createDirtyRowTable(TableInfo ti, String tableName) {
+ //
+
+ }
+
+ @Override
+ public void dropDirtyRowTable(String tableName) {
+ //
+
+ }
+
+ @Override
+ public void clearMusicForTable(String tableName) {
+ //
+
+ }
+
+ @Override
+ public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ //
+
+ }
+
+ @Override
+ public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ //
+
+ }
+
+ @Override
+ public List<Map<String, Object>> getDirtyRows(TableInfo ti, String tableName) {
+ //
+ return null;
+ }
+
+ @Override
+ public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) {
+ //
+
+ }
+
+ @Override
+ public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) {
+ //
+
+ }
+
+ @Override
+ public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) {
+ updateDirtyRowAndEntityTableInMusic(tableName, changedRow, false);
+
+ }
+
+ public void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow, boolean isCritical) { }
+
+
+ public static void loadProperties() {
+ Properties prop = new Properties();
+ InputStream input = null;
+ try {
+ input = MusicMixin.class.getClassLoader().getResourceAsStream("mdbc.properties");
+ prop.load(input);
+ String crTable = prop.getProperty("critical.tables");
+ String[] tableArr = crTable.split(",");
+ criticalTables = Arrays.asList(tableArr);
+
+ }
+ catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ finally {
+ if (input != null) {
+ try {
+ input.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ public static void releaseZKLocks(Set<LockId> lockIds) {
+ for(LockId lockId: lockIds) {
+ System.out.println("Releasing lock: "+lockId);
+ try {
+ MusicPureCassaCore.voluntaryReleaseLock(lockId.getFullyQualifiedLockKey(),lockId.getLockReference());
+ MusicPureCassaCore.destroyLockRef(lockId.getFullyQualifiedLockKey(),lockId.getLockReference());
+ } catch (MusicLockingException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String tableName, JSONObject changedRow) {
+ //
+ return null;
+ }
+
+ @Override
+ public void initializeMdbcDataStructures() {
+ //
+
+ }
+
+ @Override
+ public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) {
+ return null;
+ }
+
+ @Override
+ public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap<Range,StagingTable> transactionDigest, String txId,TxCommitProgress progressKeeper)
+ throws MDBCServiceException{
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public TablePartitionInformation getTablePartitionInformation(String table){
+ return null;
+ }
+
+ @Override
+ public HashMap<Range,StagingTable> getTransactionDigest(RedoRecordId id){
+ return null;
+ }
+
+ @Override
+ public TransactionInformationElement getTransactionInformation(String id){
+ return null;
+ }
+
+ @Override
+ public void updateTitReference(String partition, TitReference tit){}
+
+ @Override
+ public List<RedoHistoryElement> getHistory(DatabasePartition partition){
+ return null;
+ }
+
+ @Override
+ public void addRedoHistory(DatabasePartition partition, TitReference newTit, List<TitReference> old){
+ }
+
+ @Override
+ public TitReference createPartition(List<String> tables, int replicationFactor, String currentOwner){
+ return null;
+ }
+
+ @Override
+ public List<PartitionInformation> getPartitionInformation(DatabasePartition partition){
+ return null;
+ }
+
+ @Override
+ public TitReference createTransactionInformationRow(TransactionInformationElement info){
+ return null;
+ }
+
+ @Override
+ public void appendToRedoLog(TitReference titRow, DatabasePartition partition, RedoRecordId newRecord){
+ }
+
+ @Override
+ public void appendRedoRecord(String redoRecordTable, RedoRecordId newRecord, String transactionDigest){
+ }
+
+ @Override
+ public void updateTablePartition(String table, DatabasePartition partition){}
+
+ @Override
+ public void updatePartitionOwner(String partition, String owner){}
+
+ @Override
+ public void updatePartitionReplicationFactor(String partition, int replicationFactor){}
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java b/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java
new file mode 100755
index 0000000..a836a39
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java
@@ -0,0 +1,784 @@
+package com.att.research.mdbc.mixins;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.json.JSONObject;
+import org.json.JSONTokener;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.MusicSqlManager;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+
+import net.sf.jsqlparser.JSQLParserException;
+import net.sf.jsqlparser.parser.CCJSqlParserUtil;
+import net.sf.jsqlparser.statement.delete.Delete;
+import net.sf.jsqlparser.statement.insert.Insert;
+import net.sf.jsqlparser.statement.update.Update;
+
+/**
+ * This class provides the methods that MDBC needs in order to mirror data to/from a
+ * <a href="https://dev.mysql.com/">MySQL</a> or <a href="http://mariadb.org/">MariaDB</a> database instance.
+ * This class uses the <code>JSON_OBJECT()</code> database function, which means it requires the following
+ * minimum versions of either database:
+ * <table summary="">
+ * <tr><th>DATABASE</th><th>VERSION</th></tr>
+ * <tr><td>MySQL</td><td>5.7.8</td></tr>
+ * <tr><td>MariaDB</td><td>10.2.3 (Note: 10.2.3 is currently (July 2017) a <i>beta</i> release)</td></tr>
+ * </table>
+ *
+ * @author Robert P. Eby
+ */
+public class MySQLMixin implements DBInterface {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MySQLMixin.class);
+
+ public static final String MIXIN_NAME = "mysql";
+ public static final String TRANS_TBL = "MDBC_TRANSLOG";
+ private static final String CREATE_TBL_SQL =
+ "CREATE TABLE IF NOT EXISTS "+TRANS_TBL+
+ " (IX INT AUTO_INCREMENT, OP CHAR(1), TABLENAME VARCHAR(255), NEWROWDATA VARCHAR(1024), KEYDATA VARCHAR(1024), CONNECTION_ID INT,PRIMARY KEY (IX))";
+
+ private final MusicSqlManager msm;
+ private final int connId;
+ private final String dbName;
+ private final Connection dbConnection;
+ private final Map<String, TableInfo> tables;
+ private boolean server_tbl_created = false;
+
+ public MySQLMixin() {
+ this.msm = null;
+ this.connId = 0;
+ this.dbName = null;
+ this.dbConnection = null;
+ this.tables = null;
+ }
+ public MySQLMixin(MusicSqlManager msm, String url, Connection conn, Properties info) {
+ this.msm = msm;
+ this.connId = generateConnID(conn);
+ this.dbName = getDBName(conn);
+ this.dbConnection = conn;
+ this.tables = new HashMap<String, TableInfo>();
+ }
+ // This is used to generate a unique connId for this connection to the DB.
+ private int generateConnID(Connection conn) {
+ int rv = (int) System.currentTimeMillis(); // random-ish
+ try {
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery("SELECT CONNECTION_ID() AS IX");
+ if (rs.next()) {
+ rv = rs.getInt("IX");
+ }
+ stmt.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"generateConnID: problem generating a connection ID!");
+ }
+ return rv;
+ }
+
+ /**
+ * Get the name of this DBnterface mixin object.
+ * @return the name
+ */
+ @Override
+ public String getMixinName() {
+ return MIXIN_NAME;
+ }
+
+ @Override
+ public void close() {
+ // nothing yet
+ }
+
+ /**
+ * Determines the db name associated with the connection
+ * This is the private/internal method that actually determines the name
+ * @param conn
+ * @return
+ */
+ private String getDBName(Connection conn) {
+ String dbname = "mdbc"; //default name
+ try {
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery("SELECT DATABASE() AS DB");
+ if (rs.next()) {
+ dbname = rs.getString("DB");
+ }
+ stmt.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "getDBName: problem getting database name from mysql");
+ }
+ return dbname;
+ }
+
+ public String getDatabaseName() {
+ return this.dbName;
+ }
+ /**
+ * Get a set of the table names in the database.
+ * @return the set
+ */
+ @Override
+ public Set<String> getSQLTableSet() {
+ Set<String> set = new TreeSet<String>();
+ String sql = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=DATABASE() AND TABLE_TYPE='BASE TABLE'";
+ try {
+ Statement stmt = dbConnection.createStatement();
+ ResultSet rs = stmt.executeQuery(sql);
+ while (rs.next()) {
+ String s = rs.getString("TABLE_NAME");
+ set.add(s);
+ }
+ stmt.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"getSQLTableSet: "+e);
+ }
+ logger.debug(EELFLoggerDelegate.applicationLogger,"getSQLTableSet returning: "+ set);
+ return set;
+ }
+/*
+mysql> describe tables;
++-----------------+---------------------+------+-----+---------+-------+
+| Field | Type | Null | Key | Default | Extra |
++-----------------+---------------------+------+-----+---------+-------+
+| TABLE_CATALOG | varchar(512) | NO | | | |
+| TABLE_SCHEMA | varchar(64) | NO | | | |
+| TABLE_NAME | varchar(64) | NO | | | |
+| TABLE_TYPE | varchar(64) | NO | | | |
+| ENGINE | varchar(64) | YES | | NULL | |
+| VERSION | bigint(21) unsigned | YES | | NULL | |
+| ROW_FORMAT | varchar(10) | YES | | NULL | |
+| TABLE_ROWS | bigint(21) unsigned | YES | | NULL | |
+| AVG_ROW_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| DATA_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| MAX_DATA_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| INDEX_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| DATA_FREE | bigint(21) unsigned | YES | | NULL | |
+| AUTO_INCREMENT | bigint(21) unsigned | YES | | NULL | |
+| CREATE_TIME | datetime | YES | | NULL | |
+| UPDATE_TIME | datetime | YES | | NULL | |
+| CHECK_TIME | datetime | YES | | NULL | |
+| TABLE_COLLATION | varchar(32) | YES | | NULL | |
+| CHECKSUM | bigint(21) unsigned | YES | | NULL | |
+| CREATE_OPTIONS | varchar(255) | YES | | NULL | |
+| TABLE_COMMENT | varchar(2048) | NO | | | |
++-----------------+---------------------+------+-----+---------+-------+
+ */
+ /**
+ * Return a TableInfo object for the specified table.
+ * This method first looks in a cache of previously constructed TableInfo objects for the table.
+ * If not found, it queries the INFORMATION_SCHEMA.COLUMNS table to obtain the column names, types, and indexes of the table.
+ * It creates a new TableInfo object with the results.
+ * @param tableName the table to look up
+ * @return a TableInfo object containing the info we need, or null if the table does not exist
+ */
+ @Override
+ public TableInfo getTableInfo(String tableName) {
+ TableInfo ti = tables.get(tableName);
+ if (ti == null) {
+ try {
+ String tbl = tableName;//.toUpperCase();
+ String sql = "SELECT COLUMN_NAME, DATA_TYPE, COLUMN_KEY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA=DATABASE() AND TABLE_NAME='"+tbl+"'";
+ ResultSet rs = executeSQLRead(sql);
+ if (rs != null) {
+ ti = new TableInfo();
+ while (rs.next()) {
+ String name = rs.getString("COLUMN_NAME");
+ String type = rs.getString("DATA_TYPE");
+ String ckey = rs.getString("COLUMN_KEY");
+ ti.columns.add(name);
+ ti.coltype.add(mapDatatypeNameToType(type));
+ ti.iskey.add(ckey != null && !ckey.equals(""));
+ }
+ rs.getStatement().close();
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL.");
+ }
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL: "+e);
+ return null;
+ }
+ tables.put(tableName, ti);
+ }
+ return ti;
+ }
+ // Map MySQL data type names to the java.sql.Types equivalent
+ private int mapDatatypeNameToType(String nm) {
+ switch (nm) {
+ case "tinyint": return Types.TINYINT;
+ case "smallint": return Types.SMALLINT;
+ case "mediumint":
+ case "int": return Types.INTEGER;
+ case "bigint": return Types.BIGINT;
+ case "decimal":
+ case "numeric": return Types.DECIMAL;
+ case "float": return Types.FLOAT;
+ case "double": return Types.DOUBLE;
+ case "date":
+ case "datetime": return Types.DATE;
+ case "time": return Types.TIME;
+ case "timestamp": return Types.TIMESTAMP;
+ case "char": return Types.CHAR;
+ case "text":
+ case "varchar": return Types.VARCHAR;
+ case "mediumblob":
+ case "blob": return Types.VARCHAR;
+ default:
+ logger.error(EELFLoggerDelegate.errorLogger,"unrecognized and/or unsupported data type "+nm);
+ return Types.VARCHAR;
+ }
+ }
+ @Override
+ public void createSQLTriggers(String tableName) {
+ // Don't create triggers for the table the triggers write into!!!
+ if (tableName.equals(TRANS_TBL))
+ return;
+ try {
+ if (!server_tbl_created) {
+ try {
+ Statement stmt = dbConnection.createStatement();
+ stmt.execute(CREATE_TBL_SQL);
+ stmt.close();
+ logger.info(EELFLoggerDelegate.applicationLogger,"createSQLTriggers: Server side dirty table created.");
+ server_tbl_created = true;
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: problem creating the "+TRANS_TBL+" table!");
+ }
+ }
+
+ // Give the triggers a way to find this MSM
+ for (String name : getTriggerNames(tableName)) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"ADD trigger "+name+" to msm_map");
+ //\TODO fix this is an error
+ //msm.register(name);
+ }
+ // No SELECT trigger
+ executeSQLWrite(generateTrigger(tableName, "INSERT"));
+ executeSQLWrite(generateTrigger(tableName, "UPDATE"));
+ executeSQLWrite(generateTrigger(tableName, "DELETE"));
+ } catch (SQLException e) {
+ if (e.getMessage().equals("Trigger already exists")) {
+ //only warn if trigger already exists
+ logger.warn(EELFLoggerDelegate.applicationLogger, "createSQLTriggers" + e);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: "+e);
+ }
+ }
+ }
+/*
+CREATE TRIGGER `triggername` BEFORE UPDATE ON `table`
+FOR EACH ROW BEGIN
+INSERT INTO `log_table` ( `field1` `field2`, ...) VALUES ( NEW.`field1`, NEW.`field2`, ...) ;
+END;
+
+OLD.field refers to the old value
+NEW.field refers to the new value
+*/
+ private String generateTrigger(String tableName, String op) {
+ boolean isdelete = op.equals("DELETE");
+ boolean isinsert = op.equals("INSERT");
+ TableInfo ti = getTableInfo(tableName);
+ StringBuilder newJson = new StringBuilder("JSON_OBJECT("); // JSON_OBJECT(key, val, key, val) page 1766
+ StringBuilder keyJson = new StringBuilder("JSON_OBJECT(");
+ String pfx = "";
+ String keypfx = "";
+ for (String col : ti.columns) {
+ newJson.append(pfx)
+ .append("'").append(col).append("', ")
+ .append(isdelete ? "OLD." : "NEW.")
+ .append(col);
+ if (ti.iskey(col) || !ti.hasKey()) {
+ keyJson.append(keypfx)
+ .append("'").append(col).append("', ")
+ .append(isinsert ? "NEW." : "OLD.")
+ .append(col);
+ keypfx = ", ";
+ }
+ pfx = ", ";
+ }
+ newJson.append(")");
+ keyJson.append(")");
+ //\TODO check if using mysql driver, so instead check the exception
+ StringBuilder sb = new StringBuilder()
+ .append("CREATE TRIGGER ") // IF NOT EXISTS not supported by MySQL!
+ .append(String.format("%s_%s", op.substring(0, 1), tableName))
+ .append(" AFTER ")
+ .append(op)
+ .append(" ON ")
+ .append(tableName)
+ .append(" FOR EACH ROW INSERT INTO ")
+ .append(TRANS_TBL)
+ .append(" (TABLENAME, OP, NEWROWDATA, KEYDATA, CONNECTION_ID) VALUES('")
+ .append(tableName)
+ .append("', ")
+ .append(isdelete ? "'D'" : (op.equals("INSERT") ? "'I'" : "'U'"))
+ .append(", ")
+ .append(newJson.toString())
+ .append(", ")
+ .append(keyJson.toString())
+ .append(", ")
+ .append("CONNECTION_ID()")
+ .append(")");
+ return sb.toString();
+ }
+ private String[] getTriggerNames(String tableName) {
+ return new String[] {
+ "I_" + tableName, // INSERT trigger
+ "U_" + tableName, // UPDATE trigger
+ "D_" + tableName // DELETE trigger
+ };
+ }
+
+ @Override
+ public void dropSQLTriggers(String tableName) {
+ try {
+ for (String name : getTriggerNames(tableName)) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"REMOVE trigger "+name+" from msmmap");
+ executeSQLWrite("DROP TRIGGER IF EXISTS " +name);
+ //\TODO Fix this is an error
+ //msm.unregister(name);
+ }
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"dropSQLTriggers: "+e);
+ }
+ }
+
+ @Override
+ public void insertRowIntoSqlDb(String tableName, Map<String, Object> map) {
+ TableInfo ti = getTableInfo(tableName);
+ String sql = "";
+ if (rowExists(tableName, ti, map)) {
+ // Update - Construct the what and where strings for the DB write
+ StringBuilder what = new StringBuilder();
+ StringBuilder where = new StringBuilder();
+ String pfx = "";
+ String pfx2 = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ String col = ti.columns.get(i);
+ String val = Utils.getStringValue(map.get(col));
+ if (ti.iskey.get(i)) {
+ where.append(pfx).append(col).append("=").append(val);
+ pfx = " AND ";
+ } else {
+ what.append(pfx2).append(col).append("=").append(val);
+ pfx2 = ", ";
+ }
+ }
+ sql = String.format("UPDATE %s SET %s WHERE %s", tableName, what.toString(), where.toString());
+ } else {
+ // Construct the value string and column name string for the DB write
+ StringBuilder fields = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ String pfx = "";
+ for (String col : ti.columns) {
+ fields.append(pfx).append(col);
+ values.append(pfx).append(Utils.getStringValue(map.get(col)));
+ pfx = ", ";
+ }
+ sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString());
+ }
+ try {
+ executeSQLWrite(sql);
+ } catch (SQLException e1) {
+ logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite: "+e1);
+ }
+ // TODO - remove any entries from MDBC_TRANSLOG corresponding to this update
+ // SELECT IX, OP, KEYDATA FROM MDBC_TRANS_TBL WHERE CONNID = "+connId AND TABLENAME = tblname
+ }
+
+ private boolean rowExists(String tableName, TableInfo ti, Map<String, Object> map) {
+ StringBuilder where = new StringBuilder();
+ String pfx = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ String col = ti.columns.get(i);
+ String val = Utils.getStringValue(map.get(col));
+ where.append(pfx).append(col).append("=").append(val);
+ pfx = " AND ";
+ }
+ }
+ String sql = String.format("SELECT * FROM %s WHERE %s", tableName, where.toString());
+ ResultSet rs = executeSQLRead(sql);
+ try {
+ boolean rv = rs.next();
+ rs.close();
+ return rv;
+ } catch (SQLException e) {
+ return false;
+ }
+ }
+
+
+ @Override
+ public void deleteRowFromSqlDb(String tableName, Map<String, Object> map) {
+ TableInfo ti = getTableInfo(tableName);
+ StringBuilder where = new StringBuilder();
+ String pfx = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ String col = ti.columns.get(i);
+ Object val = map.get(col);
+ where.append(pfx).append(col).append("=").append(Utils.getStringValue(val));
+ pfx = " AND ";
+ }
+ }
+ try {
+ String sql = String.format("DELETE FROM %s WHERE %s", tableName, where.toString());
+ executeSQLWrite(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * This method executes a read query in the SQL database. Methods that call this method should be sure
+ * to call resultset.getStatement().close() when done in order to free up resources.
+ * @param sql the query to run
+ * @return a ResultSet containing the rows returned from the query
+ */
+ @Override
+ public ResultSet executeSQLRead(String sql) {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeSQLRead");
+ logger.debug("Executing SQL read:"+ sql);
+ ResultSet rs = null;
+ try {
+ Statement stmt = dbConnection.createStatement();
+ rs = stmt.executeQuery(sql);
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"executeSQLRead"+e);
+ }
+ return rs;
+ }
+
+ /**
+ * This method executes a write query in the sql database.
+ * @param sql the SQL to be sent to MySQL
+ * @throws SQLException if an underlying JDBC method throws an exception
+ */
+ protected void executeSQLWrite(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Executing SQL write:"+ sql);
+
+ Statement stmt = dbConnection.createStatement();
+ stmt.execute(sql);
+ stmt.close();
+ }
+
+ /**
+ * Code to be run within the DB driver before a SQL statement is executed. This is where tables
+ * can be synchronized before a SELECT, for those databases that do not support SELECT triggers.
+ * @param sql the SQL statement that is about to be executed
+ * @return list of keys that will be updated, if they can't be determined afterwards (i.e. sql table doesn't have primary key)
+ */
+ @Override
+ public void preStatementHook(final String sql) {
+ if (sql == null) {
+ return;
+ }
+ String cmd = sql.trim().toLowerCase();
+ if (cmd.startsWith("select")) {
+ String[] parts = sql.trim().split(" ");
+ Set<String> set = getSQLTableSet();
+ for (String part : parts) {
+ if (set.contains(part.toUpperCase())) {
+ // Found a candidate table name in the SELECT SQL -- update this table
+ //msm.readDirtyRowsAndUpdateDb(part);
+ }
+ }
+ }
+ }
+
+ /**
+ * Code to be run within the DB driver after a SQL statement has been executed. This is where remote
+ * statement actions can be copied back to Cassandra/MUSIC.
+ * @param sql the SQL statement that was executed
+ */
+ @Override
+ public void postStatementHook(final String sql,Map<Range,StagingTable> transactionDigest) {
+ if (sql != null) {
+ String[] parts = sql.trim().split(" ");
+ String cmd = parts[0].toLowerCase();
+ if ("delete".equals(cmd) || "insert".equals(cmd) || "update".equals(cmd)) {
+ try {
+ this.updateStagingTable(transactionDigest);
+ } catch (NoSuchFieldException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ private OperationType toOpEnum(String operation) throws NoSuchFieldException {
+ switch (operation.toLowerCase()) {
+ case "i":
+ return OperationType.INSERT;
+ case "d":
+ return OperationType.DELETE;
+ case "u":
+ return OperationType.UPDATE;
+ case "s":
+ return OperationType.SELECT;
+ default:
+ logger.error(EELFLoggerDelegate.errorLogger,"Invalid operation selected: ["+operation+"]");
+ throw new NoSuchFieldException("Invalid operation enum");
+ }
+
+ }
+ /**
+ * Copy data that is in transaction table into music interface
+ * @param transactionDigests
+ * @throws NoSuchFieldException
+ */
+ private void updateStagingTable(Map<Range,StagingTable> transactionDigests) throws NoSuchFieldException {
+ // copy from DB.MDBC_TRANSLOG where connid == myconnid
+ // then delete from MDBC_TRANSLOG
+ String sql2 = "SELECT IX, TABLENAME, OP, KEYDATA, NEWROWDATA FROM "+TRANS_TBL +" WHERE CONNECTION_ID = " + this.connId;
+ try {
+ ResultSet rs = executeSQLRead(sql2);
+ Set<Integer> rows = new TreeSet<Integer>();
+ while (rs.next()) {
+ int ix = rs.getInt("IX");
+ String op = rs.getString("OP");
+ OperationType opType = toOpEnum(op);
+ String tbl = rs.getString("TABLENAME");
+ String keydataStr = rs.getString("KEYDATA");
+ String newRowStr = rs.getString("NEWROWDATA");
+ JSONObject newRow = new JSONObject(new JSONTokener(newRowStr));
+ String musicKey;
+ TableInfo ti = getTableInfo(tbl);
+ if (!ti.hasKey()) {
+ //create music key
+ //\TODO fix, this is completely broken
+ //if (op.startsWith("I")) {
+ //\TODO Improve the generation of primary key, it should be generated using
+ // the actual columns, otherwise performance when doing range queries are going
+ // to be even worse (see the else bracket down)
+ //
+ musicKey = msm.generateUniqueKey();
+ /*} else {
+ //get key from data
+ musicKey = msm.getMusicKeyFromRowWithoutPrimaryIndexes(tbl,newRow);
+ }*/
+ newRow.put(msm.getMusicDefaultPrimaryKeyName(), musicKey);
+ }
+ else {
+ //Use the keys
+ musicKey = msm.getMusicKeyFromRow(tbl, newRow);
+ if(musicKey.isEmpty()) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Primary key is invalid: ["+tbl+","+op+"]");
+ throw new NoSuchFieldException("Invalid operation enum");
+ }
+ }
+ Range range = new Range(tbl);
+ if(!transactionDigests.containsKey(range)) {
+ transactionDigests.put(range, new StagingTable());
+ }
+ transactionDigests.get(range).addOperation(musicKey, opType, keydataStr, newRow.toString());
+ rows.add(ix);
+ }
+ rs.getStatement().close();
+ if (rows.size() > 0) {
+ sql2 = "DELETE FROM "+TRANS_TBL+" WHERE IX = ?";
+ PreparedStatement ps = dbConnection.prepareStatement(sql2);
+ logger.debug("Executing: "+sql2);
+ logger.debug(" For ix = "+rows);
+ for (int ix : rows) {
+ ps.setInt(1, ix);
+ ps.execute();
+ }
+ ps.close();
+ }
+ } catch (SQLException e) {
+ logger.warn("Exception in postStatementHook: "+e);
+ e.printStackTrace();
+ }
+ }
+
+
+
+ /**
+ * Update music with data from MySQL table
+ *
+ * @param tableName - name of table to update in music
+ */
+ @Override
+ public void synchronizeData(String tableName) {
+ ResultSet rs = null;
+ TableInfo ti = getTableInfo(tableName);
+ String query = "SELECT * FROM "+tableName;
+
+ try {
+ rs = executeSQLRead(query);
+ if(rs==null) return;
+ while(rs.next()) {
+
+ JSONObject jo = new JSONObject();
+ if (!getTableInfo(tableName).hasKey()) {
+ String musicKey = msm.generateUniqueKey();
+ jo.put(msm.getMusicDefaultPrimaryKeyName(), musicKey);
+ }
+
+ for (String col : ti.columns) {
+ jo.put(col, rs.getString(col));
+ }
+
+ @SuppressWarnings("unused")
+ Object[] row = Utils.jsonToRow(ti,tableName, jo,msm.getMusicDefaultPrimaryKeyName());
+ //\FIXME this is wrong now, update of the dirty row and entity is now handled by the archival process
+ //msm.updateDirtyRowAndEntityTableInMusic(ti,tableName, jo);
+ }
+ } catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "synchronizing data " + tableName +
+ " -> " + e.getMessage());
+ }
+ finally {
+ try {
+ rs.close();
+ } catch (SQLException e) {
+ //continue
+ }
+ }
+
+ }
+
+ /**
+ * Return a list of "reserved" names, that should not be used by MySQL client/MUSIC
+ * These are reserved for mdbc
+ */
+ @Override
+ public List<String> getReservedTblNames() {
+ ArrayList<String> rsvdTables = new ArrayList<String>();
+ rsvdTables.add(TRANS_TBL);
+ //Add others here as necessary
+ return rsvdTables;
+ }
+ @Override
+ public String getPrimaryKey(String sql, String tableName) {
+ //
+ return null;
+ }
+
+ @SuppressWarnings("unused")
+ @Deprecated
+ private ArrayList<String> getMusicKey(String sql) {
+ try {
+ net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql);
+ if (stmt instanceof Insert) {
+ Insert s = (Insert) stmt;
+ String tbl = s.getTable().getName();
+ return getMusicKey(tbl, "INSERT", sql);
+ } else if (stmt instanceof Update){
+ Update u = (Update) stmt;
+ String tbl = u.getTables().get(0).getName();
+ return getMusicKey(tbl, "UPDATE", sql);
+ } else if (stmt instanceof Delete) {
+ Delete d = (Delete) stmt;
+ //TODO: IMPLEMENT
+ String tbl = d.getTable().getName();
+ return getMusicKey(tbl, "DELETE", sql);
+ } else {
+ System.err.println("Not recognized sql type");
+ }
+
+ } catch (JSQLParserException e) {
+
+ e.printStackTrace();
+ }
+ //Something went wrong here
+ return new ArrayList<String>();
+ }
+
+ /**
+ * Returns all keys that matches the current sql statement, and not in already updated keys.
+ *
+ * @param tbl
+ * @param cmd
+ * @param sql
+ */
+ @Deprecated
+ private ArrayList<String> getMusicKey(String tbl, String cmd, String sql) {
+ ArrayList<String> musicKeys = new ArrayList<String>();
+ /*
+ if (cmd.equalsIgnoreCase("insert")) {
+ //create key, return key
+ musicKeys.add(msm.generatePrimaryKey());
+ } else if (cmd.equalsIgnoreCase("update") || cmd.equalsIgnoreCase("delete")) {
+ try {
+ net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql);
+ String where;
+ if (stmt instanceof Update) {
+ where = ((Update) stmt).getWhere().toString();
+ } else if (stmt instanceof Delete) {
+ where = ((Delete) stmt).getWhere().toString();
+ } else {
+ System.err.println("Unknown type: " +stmt.getClass());
+ where = "";
+ }
+ ResultSet rs = executeSQLRead("SELECT * FROM " + tbl + " WHERE " + where);
+ musicKeys = msm.getMusicKeysWhere(tbl, Utils.parseResults(getTableInfo(tbl), rs));
+ } catch (JSQLParserException e) {
+
+ e.printStackTrace();
+ } catch (SQLException e) {
+ //Not a valid sql query
+ e.printStackTrace();
+ }
+ }
+ */
+ return musicKeys;
+ }
+
+
+ @Deprecated
+ public void insertRowIntoSqlDbOLD(String tableName, Map<String, Object> map) {
+ // First construct the value string and column name string for the db write
+ TableInfo ti = getTableInfo(tableName);
+ StringBuilder fields = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ String pfx = "";
+ for (String col : ti.columns) {
+ fields.append(pfx).append(col);
+ values.append(pfx).append(Utils.getStringValue(map.get(col)));
+ pfx = ", ";
+ }
+
+ try {
+ String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString());
+ executeSQLWrite(sql);
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Insert failed because row exists, do an update");
+ StringBuilder where = new StringBuilder();
+ pfx = "";
+ String pfx2 = "";
+ fields.setLength(0);
+ for (int i = 0; i < ti.columns.size(); i++) {
+ String col = ti.columns.get(i);
+ String val = Utils.getStringValue(map.get(col));
+ if (ti.iskey.get(i)) {
+ where.append(pfx).append(col).append("=").append(val);
+ pfx = " AND ";
+ } else {
+ fields.append(pfx2).append(col).append("=").append(val);
+ pfx2 = ", ";
+ }
+ }
+ String sql = String.format("UPDATE %s SET %s WHERE %s", tableName, fields.toString(), where.toString());
+ try {
+ executeSQLWrite(sql);
+ } catch (SQLException e1) {
+ logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite"+e1);
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/Operation.java b/src/main/java/com/att/research/mdbc/mixins/Operation.java
new file mode 100644
index 0000000..4ca8048
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/Operation.java
@@ -0,0 +1,31 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.Serializable;
+
+import org.json.JSONObject;
+import org.json.JSONTokener;
+
+public final class Operation implements Serializable{
+
+ private static final long serialVersionUID = -1215301985078183104L;
+
+ final OperationType TYPE;
+ final String OLD_VAL;
+ final String NEW_VAL;
+
+ public Operation(OperationType type, String newVal, String oldVal) {
+ TYPE = type;
+ NEW_VAL = newVal;
+ OLD_VAL = oldVal;
+ }
+
+ public JSONObject getNewVal(){
+ JSONObject newRow = new JSONObject(new JSONTokener(NEW_VAL));
+ return newRow;
+ }
+
+ public JSONObject getOldVal(){
+ JSONObject keydata = new JSONObject(new JSONTokener(OLD_VAL));
+ return keydata;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/OperationType.java b/src/main/java/com/att/research/mdbc/mixins/OperationType.java
new file mode 100644
index 0000000..0160eb5
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/OperationType.java
@@ -0,0 +1,5 @@
+package com.att.research.mdbc.mixins;
+
+public enum OperationType{
+ DELETE, UPDATE, INSERT, SELECT
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java b/src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java
new file mode 100644
index 0000000..12b8e4f
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java
@@ -0,0 +1,19 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public class PartitionInformation {
+ public final String partition;
+ public final TitReference tit;
+ public final List<String> tables;
+ public final int replicationFactor;
+ public final String currentOwner;
+
+ public PartitionInformation(String partition, TitReference tit, List<String> tables, int replicationFactor, String currentOwner) {
+ this.partition=partition;
+ this.tit=tit;
+ this.tables=tables;
+ this.replicationFactor=replicationFactor;
+ this.currentOwner=currentOwner;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java b/src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java
new file mode 100644
index 0000000..9d685cc
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java
@@ -0,0 +1,15 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public final class RedoHistoryElement {
+ public final String partition;
+ public final TitReference current;
+ public final List<TitReference> previous;
+
+ public RedoHistoryElement(String partition, TitReference current, List<TitReference> previous) {
+ this.partition = partition;
+ this.current = current;
+ this.previous = previous;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java b/src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java
new file mode 100644
index 0000000..8a4923f
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java
@@ -0,0 +1,15 @@
+package com.att.research.mdbc.mixins;
+
+public final class RedoRecordId {
+ public final String leaseId;
+ public final String commitId;
+
+ public RedoRecordId(String leaseId, String commitId) {
+ this.leaseId = leaseId;
+ this.commitId = commitId;
+ }
+
+ public boolean isEmpty() {
+ return (this.leaseId==null || this.leaseId.isEmpty())&&(this.commitId==null||this.commitId.isEmpty());
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/StagingTable.java b/src/main/java/com/att/research/mdbc/mixins/StagingTable.java
new file mode 100644
index 0000000..7da348d
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/StagingTable.java
@@ -0,0 +1,50 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.Serializable;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Set;
+import org.apache.commons.lang3.tuple.Pair;
+import org.json.JSONObject;
+
+import com.att.research.logging.EELFLoggerDelegate;
+
+public class StagingTable implements Serializable{
+ /**
+ *
+ */
+ private static final long serialVersionUID = 7583182634761771943L;
+ private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StagingTable.class);
+ private HashMap<String,Deque<Operation>> operations;
+
+ public StagingTable() {
+ operations = new HashMap<>();
+ }
+
+ synchronized public void addOperation(String key, OperationType type, String oldVal, String newVal) {
+ if(!operations.containsKey(key)) {
+ operations.put(key, new LinkedList<>());
+ }
+ operations.get(key).add(new Operation(type,newVal,oldVal));
+ }
+
+ synchronized public Deque<Pair<String,Operation>> getIterableSnapshot() throws NoSuchFieldException{
+ Deque<Pair<String,Operation>> response=new LinkedList<Pair<String,Operation>>();
+ //\TODO: check if we can just return the last change to a given key
+ Set<String> keys = operations.keySet();
+ for(String key : keys) {
+ Deque<Operation> ops = operations.get(key);
+ if(ops.isEmpty()) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Invalid state of the Operation data structure when creating snapshot");
+ throw new NoSuchFieldException("Invalid state of the operation data structure");
+ }
+ response.add(Pair.of(key,ops.getLast()));
+ }
+ return response;
+ }
+
+ synchronized public void clean() {
+ operations.clear();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java b/src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java
new file mode 100644
index 0000000..a2cf5dd
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java
@@ -0,0 +1,15 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public final class TablePartitionInformation {
+ public final String table;
+ public final String partition;
+ public final List<String> oldPartitions;
+
+ public TablePartitionInformation(String table, String partition, List<String> oldPartitions) {
+ this.table = table;
+ this.partition = partition;
+ this.oldPartitions = oldPartitions;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TitReference.java b/src/main/java/com/att/research/mdbc/mixins/TitReference.java
new file mode 100644
index 0000000..f27b3a0
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TitReference.java
@@ -0,0 +1,12 @@
+package com.att.research.mdbc.mixins;
+
+public final class TitReference {
+ public final String table;
+ public final String index;
+
+ public TitReference(String table, String index) {
+ this.table = table;
+ this.index= index;
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java b/src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java
new file mode 100644
index 0000000..1c8b799
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java
@@ -0,0 +1,19 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public final class TransactionInformationElement {
+ public final String index;
+ public final List<RedoRecordId> redoLog;
+ public final String partition;
+ public final int latestApplied;
+ public final boolean applied;
+
+ public TransactionInformationElement(String index, List<RedoRecordId> redoLog, String partition, int latestApplied, boolean applied) {
+ this.index = index;
+ this.redoLog = redoLog;
+ this.partition = partition;
+ this.latestApplied = latestApplied;
+ this.applied = applied;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java b/src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java
new file mode 100644
index 0000000..c0f7089
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java
@@ -0,0 +1,206 @@
+package com.att.research.mdbc.mixins;
+
+import java.math.BigInteger;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import com.att.research.logging.EELFLoggerDelegate;
+
+import java.sql.Connection;
+import java.util.concurrent.atomic.AtomicReference;
+
+
+public class TxCommitProgress{
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TxCommitProgress.class);
+
+ private AtomicReference<BigInteger> nextCommitId;
+ private Map<String, CommitProgress> transactionInfo;
+
+ public TxCommitProgress(){
+ nextCommitId=new AtomicReference<>(BigInteger.ZERO);
+ transactionInfo = new ConcurrentHashMap<>();
+ }
+
+ public boolean containsTx(String txId) {
+ return transactionInfo.containsKey(txId);
+ }
+
+ public BigInteger getCommitId(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog.isCommitIdAssigned()) {
+ return prog.getCommitId();
+ }
+ BigInteger commitId = nextCommitId.getAndUpdate((a)-> a.add(BigInteger.ONE));
+ prog.setCommitId(commitId);
+ return commitId;
+ }
+
+ public void createNewTransactionTracker(String id, Connection conn) {
+ transactionInfo.put(id, new CommitProgress(id,conn));
+ }
+
+ public void commitRequested(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing commit request",txId);
+ }
+ prog.setCommitRequested();
+ }
+
+ public void setSQLDone(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of SQL",txId);
+ }
+ prog.setSQLCompleted();
+ }
+
+ public void setMusicDone(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of Music",txId);
+ }
+ prog.setMusicCompleted();
+ }
+
+ public Connection getConnection(String txId){
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when retrieving statement",txId);
+ }
+ return prog.getConnection();
+ }
+
+ public void setRecordId(String txId, RedoRecordId recordId){
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when setting record Id",txId);
+ }
+ prog.setRecordId(recordId);
+ }
+
+ public RedoRecordId getRecordId(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when getting record Id",txId);
+ }
+ return prog.getRecordId();
+ }
+
+ public boolean isRecordIdAssigned(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking record",txId);
+ }
+ return prog.isRedoRecordAssigned();
+ }
+
+ public boolean isComplete(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking completion",txId);
+ }
+ return prog.isComplete();
+ }
+
+ public void reinitializeTxProgress(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when reinitializing tx progress",txId);
+ }
+ prog.reinitialize();
+ }
+
+ public void deleteTxProgress(String txId){
+ transactionInfo.remove(txId);
+ }
+}
+
+final class CommitProgress{
+ private String lTxId; // local transaction id
+ private BigInteger commitId; // commit id
+ private boolean commitRequested; //indicates if the user tried to commit the request already.
+ private boolean SQLDone; // indicates if SQL was already committed
+ private boolean MusicDone; // indicates if music commit was already performed, atomic bool
+ private Connection connection;// reference to a connection object. This is used to complete a commit if it failed in the original thread.
+ private Long timestamp; // last time this data structure was updated
+ private RedoRecordId redoRecordId;// record id for each partition
+
+ public CommitProgress(String id,Connection conn){
+ redoRecordId=null;
+ lTxId = id;
+ commitRequested = false;
+ SQLDone = false;
+ MusicDone = false;
+ connection = conn;
+ commitId = null;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized boolean isComplete() {
+ return commitRequested && SQLDone && MusicDone;
+ }
+
+ public synchronized void setCommitId(BigInteger commitId) {
+ this.commitId = commitId;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void reinitialize() {
+ commitId = null;
+ redoRecordId=null;
+ commitRequested = false;
+ SQLDone = false;
+ MusicDone = false;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void setCommitRequested() {
+ commitRequested = true;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void setSQLCompleted() {
+ SQLDone = true;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void setMusicCompleted() {
+ MusicDone = true;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public Connection getConnection() {
+ timestamp = System.currentTimeMillis();
+ return connection;
+ }
+
+ public long getTimestamInMillis() {
+ return timestamp;
+ }
+
+ public synchronized void setRecordId(RedoRecordId id) {
+ redoRecordId = id;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized boolean isRedoRecordAssigned() {
+ return this.redoRecordId!=null;
+ }
+
+ public synchronized RedoRecordId getRecordId() {
+ return redoRecordId;
+ }
+
+ public synchronized BigInteger getCommitId() {
+ return commitId;
+ }
+
+ public synchronized String getId() {
+ return this.lTxId;
+ }
+
+ public synchronized boolean isCommitIdAssigned() {
+ return this.commitId!= null;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/com/att/research/mdbc/mixins/Utils.java b/src/main/java/com/att/research/mdbc/mixins/Utils.java
new file mode 100755
index 0000000..22df08f
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/Utils.java
@@ -0,0 +1,220 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+
+import org.json.JSONObject;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.TableInfo;
+import com.datastax.driver.core.utils.Bytes;
+
+/**
+ * Utility functions used by several of the mixins should go here.
+ *
+ * @author Robert P. Eby
+ */
+public class Utils {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Utils.class);
+
+ /**
+ * Transforms and JsonObject into an array of objects
+ * @param ti information related to the table
+ * @param tbl table that jo belong to
+ * @param jo object that represents a row in the table
+ * @param musicDefaultPrimaryKeyName contains the name of key associated with the default primary key used by MUSIC, it can be null, if not requird
+ * @return array with the objects in the row
+ */
+ public static Object[] jsonToRow(TableInfo ti, String tbl, JSONObject jo, String musicDefaultPrimaryKeyName) {
+ int columnSize = ti.columns.size();
+ ArrayList<Object> rv = new ArrayList<Object>();
+ if (musicDefaultPrimaryKeyName!=null && jo.has(musicDefaultPrimaryKeyName)) {
+ rv.add(jo.getString(musicDefaultPrimaryKeyName));
+ }
+ for (int i = 0; i < columnSize; i++) {
+ String colname = ti.columns.get(i);
+ switch (ti.coltype.get(i)) {
+ case Types.BIGINT:
+ rv.add(jo.optLong(colname, 0));
+ break;
+ case Types.BOOLEAN:
+ rv.add(jo.optBoolean(colname, false));
+ break;
+ case Types.BLOB:
+ rv.add(jo.optString(colname, ""));
+ break;
+ case Types.DECIMAL:
+ rv.add(jo.optBigDecimal(colname, BigDecimal.ZERO));
+ break;
+ case Types.DOUBLE:
+ rv.add(jo.optDouble(colname, 0));
+ break;
+ case Types.INTEGER:
+ rv.add(jo.optInt(colname, 0));
+ break;
+ case Types.TIMESTAMP:
+ //rv[i] = new Date(jo.optString(colname, ""));
+ rv.add(jo.optString(colname, ""));
+ break;
+ case Types.DATE:
+ case Types.VARCHAR:
+ //Fall through
+ default:
+ rv.add(jo.optString(colname, ""));
+ break;
+ }
+ }
+ return rv.toArray();
+ }
+
+ /**
+ * Return a String equivalent of an Object. Useful for writing SQL.
+ * @param val the object to String-ify
+ * @return the String value
+ */
+ public static String getStringValue(Object val) {
+ if (val == null)
+ return "NULL";
+ if (val instanceof String)
+ return "'" + val.toString().replaceAll("'", "''") + "'"; // double any quotes
+ if (val instanceof Number)
+ return ""+val;
+ if (val instanceof ByteBuffer)
+ return "'" + Bytes.toHexString((ByteBuffer)val).substring(2) + "'"; // substring(2) is to remove the "0x" at front
+ if (val instanceof Date)
+ return "'" + (new Timestamp(((Date)val).getTime())).toString() + "'";
+ // Boolean, and anything else
+ return val.toString();
+ }
+
+ /**
+ * Parse result set and put into object array
+ * @param tbl
+ * @param rs
+ * @return
+ * @throws SQLException
+ */
+ public static ArrayList<Object[]> parseResults(TableInfo ti, ResultSet rs) throws SQLException {
+ ArrayList<Object[]> results = new ArrayList<Object[]>();
+ while (rs.next()) {
+ Object[] row = new Object[ti.columns.size()];
+ for (int i = 0; i < ti.columns.size(); i++) {
+ String colname = ti.columns.get(i);
+ switch (ti.coltype.get(i)) {
+ case Types.BIGINT:
+ row[i] = rs.getLong(colname);
+ break;
+ case Types.BOOLEAN:
+ row[i] = rs.getBoolean(colname);
+ break;
+ case Types.BLOB:
+ System.err.println("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname);
+ //logger.error("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname);
+ // throw an exception here???
+ break;
+ case Types.DOUBLE:
+ row[i] = rs.getDouble(colname);
+ break;
+ case Types.INTEGER:
+ row[i] = rs.getInt(colname);
+ break;
+ case Types.TIMESTAMP:
+ //rv[i] = new Date(jo.optString(colname, ""));
+ row[i] = rs.getString(colname);
+ break;
+ case Types.VARCHAR:
+ //Fall through
+ default:
+ row[i] = rs.getString(colname);
+ break;
+ }
+ }
+ results.add(row);
+ }
+ return results;
+ }
+
+ @SuppressWarnings("unused")
+ static List<Class<?>> getClassesImplementing(Class<?> implx) {
+ Properties pr = null;
+ try {
+ pr = new Properties();
+ pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties"));
+ }
+ catch (IOException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Could not load property file > " + e.getMessage());
+ }
+
+ List<Class<?>> list = new ArrayList<Class<?>>();
+ if (pr==null) {
+ return list;
+ }
+ String mixins = pr.getProperty("MIXINS");
+ for (String className: mixins.split("[ ,]")) {
+ try {
+ Class<?> cl = Class.forName(className.trim());
+ if (MixinFactory.impl(cl, implx)) {
+ list.add(cl);
+ }
+ } catch (ClassNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Mixin class "+className+" not found.");
+ }
+ }
+ return list;
+ }
+
+ public static void registerDefaultDrivers() {
+ Properties pr = null;
+ try {
+ pr = new Properties();
+ pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties"));
+ }
+ catch (IOException e) {
+ logger.error("Could not load property file > " + e.getMessage());
+ }
+
+ @SuppressWarnings("unused")
+ List<Class<?>> list = new ArrayList<Class<?>>();
+ String drivers = pr.getProperty("DEFAULT_DRIVERS");
+ for (String driver: drivers.split("[ ,]")) {
+ logger.info(EELFLoggerDelegate.applicationLogger, "Registering jdbc driver '" + driver + "'");
+ try {
+ @SuppressWarnings("unused")
+ Class<?> cl = Class.forName(driver.trim());
+ } catch (ClassNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Driver class "+driver+" not found.");
+ }
+ }
+ }
+
+ public static Properties getMdbcProperties() {
+ Properties prop = new Properties();
+ InputStream input = null;
+ try {
+ input = Utils.class.getClassLoader().getResourceAsStream("/mdbc.properties");
+ prop.load(input);
+ } catch (Exception e) {
+ logger.warn(EELFLoggerDelegate.applicationLogger, "Could load mdbc.properties."
+ + "Proceeding with defaults " + e.getMessage());
+ } finally {
+ if (input != null) {
+ try {
+ input.close();
+ } catch (IOException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage());
+ }
+ }
+ }
+ return prop;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/package-info.java b/src/main/java/com/att/research/mdbc/mixins/package-info.java
new file mode 100755
index 0000000..edad7e8
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/package-info.java
@@ -0,0 +1,47 @@
+/**
+ * <p>
+ * This package provides the "mixins" to use when constructing a MusicSqlManager. The mixins define how MusicSqlManager
+ * will interface both to the database being mirrored (via the {@link com.att.research.mdbc.mixins.DBInterface} interface),
+ * and how it will interface to the persistence layer provided by MUSIC (via the {@link com.att.research.mdbc.mixins.MusicInterface}
+ * interface).
+ * </p>
+ * <p>
+ * The choice of which mixins to use is determined by the MusicSqlManager constructor.
+ * It will decide based upon the URL and connection properties with which it is presented (from the
+ * {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call).
+ * </p>
+ * <p>
+ * The list of mixins that may be selected from is stored in the properties files <code>mdbc.properties</code>
+ * under the name MIXINS. This implementation provides the following mixins:
+ * </p>
+ * <table summary="">
+ * <tr><th>Name</th><th>Class</th><th>Description</th></tr>
+ * <tr><td>cassandra</td><td>c.a.r.m.m.CassandraMixin</td><td>A <a href="http://cassandra.apache.org/">Cassandra</a> based
+ * persistence layer (without any of the table locking that MUSIC normally provides).</td></tr>
+ * <tr><td>cassandra2</td><td>c.a.r.m.m.Cassandra2Mixin</td><td>Similar to the <i>cassandra</i> mixin, but stores all
+ * dirty row information in one table, rather than one table per real table.</td></tr>
+ * <tr><td>h2</td><td>c.a.r.m.m.H2Mixin</td><td>This mixin provides access to either an in-memory, or a local
+ * (file-based) version of the H2 database.</td></tr>
+ * <tr><td>h2server</td><td>c.a.r.m.m.H2ServerMixin</td><td>This mixin provides access to a copy of the H2 database
+ * running as a server. Because the server needs special Java classes in order to handle certain TRIGGER actions, the
+ * server must be et up in a special way (see below).</td></tr>
+ * <tr><td>mysql</td><td>c.a.r.m.m.MySQLMixin</td><td>This mixin provides access to MySQL running on a remote server.</td></tr>
+ * </table>
+ * <h2>Starting the H2 Server</h2>
+ * <p>
+ * The H2 Server, when used with MDBC, must contain the MDBC Trigger class, and supporting libraries.
+ * This can be done as follows:
+ * </p>
+ * <pre>
+ * CLASSPATH=$PWD/target/mdbc-h2server-0.0.1-SNAPSHOT.jar
+ * CLASSPATH=$CLASSPATH:$HOME/.m2/repository/com/h2database/h2/1.3.168/h2-1.3.168.jar
+ * CLASSPATH=$CLASSPATH:$HOME/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar
+ * CLASSPATH=$CLASSPATH:$HOME/.m2/repository/org/json/json/20160810/json-20160810.jar
+ * export CLASSPATH
+ * java org.h2.tools.Server
+ * </pre>
+ * <p>
+ * The <code>mdbc-h2server-0.0.1-SNAPSHOT.jar</code> file is built with Maven using the <code>pom-h2server.xml</code> pom file.
+ * </p>
+ */
+package com.att.research.mdbc.mixins;
diff --git a/src/main/java/com/att/research/mdbc/package-info.java b/src/main/java/com/att/research/mdbc/package-info.java
new file mode 100755
index 0000000..5ad59c8
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/package-info.java
@@ -0,0 +1,87 @@
+/**
+ * <p>
+ * This package provides a JDBC driver that can be used to mirror the contents of a database to and from
+ * <a href="http://cassandra.apache.org/">Cassandra</a>. The mirroring occurs as a side effect of
+ * execute() statements against a JDBC connection, and triggers placed in the database to catch database modifications.
+ * The initial implementation is written to mirror an <a href="http://h2database.com/">H2</a> database.
+ * </p>
+ * <p>
+ * This JDBC driver will intercept all table creations, SELECTs, INSERTs, DELETEs, and UPDATEs made to the underlying
+ * database, and make sure they are copied to Cassandra. In addition, for every table XX that is created, another table
+ * DIRTY_XX will be created to communicate the existence of <i>dirty rows</i> to other Cassandra replicas (with the
+ * Cassandra2 Mixin, the table is called DIRTY____ and there is only one table). Dirty rows
+ * will be copied, as needed back into the database from Cassandra before any SELECT.
+ * </p>
+ * <h3>To use with JDBC</h3>
+ * <ol>
+ * <li>Add this jar, and all dependent jars to your CLASSPATH.</li>
+ * <li>Rewrite your JDBC URLs from <code>jdbc:h2:...</code> to <code>jdbc:mdbc:...</code>.
+ * <li>If you supply properties to the {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call,
+ * use the following optional properties to control behavior of the proxy:
+ * <table summary="">
+ * <tr><th>Property Name</th><th>Property Value</th><th>Default Value</th></tr>
+ * <tr><td>MDBC_DB_MIXIN</td><td>The mixin name to use to select the database mixin to use for this connection.</td></tr>
+ * <tr><td>MDBC_MUSIC_MIXIN</td><td>The mixin name to use to select the MUSIC mixin to use for this connection.</td></tr>
+ * <tr><td>myid</td><td>The ID of this replica in the collection of replicas sharing the same tables.</td><td>0</td></tr>
+ * <tr><td>replicas</td><td>A comma-separated list of replica names for the collection of replicas sharing the same tables.</td><td>the value of <i>myid</i></td></tr>
+ * <tr><td>music_keyspace</td><td>The keyspace name to use in Cassandra for all tables created by this instance of MDBC.</td><td>mdbc</td></tr>
+ * <tr><td>music_address</td><td>The IP address to use to connect to Cassandra.</td><td>localhost</td></tr>
+ * <tr><td>music_rfactor</td><td>The replication factor to use for the new keyspace that is created.</td><td>2</td></tr>
+ * <tr><td>disabled</td><td>If set to <i>true</i> the mirroring is completely disabled; this is the equivalent of using the database driver directly.</td><td>false</td></tr>
+ * </table>
+ * </li>
+ * <li>Load the driver using the following call:
+ * <pre>
+ * Class.forName("com.att.research.mdbc.ProxyDriver");
+ * </pre></li>
+ * </ol>
+ * <p>Because, under the current design, the MDBC driver must be running within the same JVM as the database, MDBC
+ * will only explicitly support in-memory databases (URL of <code>jdbc:mdbc:mem:...</code>), or local file
+ * databases (URL of <code>jdbc:mdbc:/path/to/file</code>). Attempts to access a remote H2 server (URL
+ * <code>jdbc:mdbc:tcp://host/path/to/db</code>) will probably not work, although MDBC will not stop you from trying.
+ * </p>
+ *
+ * <h3>To Define a Tomcat DataSource Resource</h3>
+ * <p>The following code snippet can be used as a guide when setting up a Tomcat DataSource Resource.
+ * This snippet goes in the <i>server.xml</i> file. The items in <b>bold</b> indicate changed or new items:</p>
+ * <pre>
+ * &lt;Resource name="jdbc/ProcessEngine"
+ * auth="Container"
+ * type="javax.sql.DataSource"
+ * factory="org.apache.tomcat.jdbc.pool.DataSourceFactory"
+ * uniqueResourceName="process-engine"
+ * driverClassName="<b>com.att.research.mdbc.ProxyDriver</b>"
+ * url="jdbc:<b>mdbc</b>:./camunda-h2-dbs/process-engine;MVCC=TRUE;TRACE_LEVEL_FILE=0;DB_CLOSE_ON_EXIT=FALSE"
+ * <b>connectionProperties="myid=0;replicas=0,1,2;music_keyspace=camunda;music_address=localhost"</b>
+ * username="sa"
+ * password="sa"
+ * maxActive="20"
+ * minIdle="5" /&gt;
+ * </pre>
+ *
+ * <h3>To Define a JBoss DataSource</h3>
+ * <p>The following code snippet can be used as a guide when setting up a JBoss DataSource.
+ * This snippet goes in the <i>service.xml</i> file. The items in <b>bold</b> indicate changed or new items:</p>
+ * <pre>
+ * &lt;datasources&gt;
+ * &lt;datasource jta="true" jndi-name="java:jboss/datasources/ProcessEngine" pool-name="ProcessEngine" enabled="true" use-java-context="true" use-ccm="true"&gt;
+ * &lt;connection-url&gt;jdbc:<b>mdbc</b>:/opt/jboss-eap-6.2.4/standalone/camunda-h2-dbs/process-engine;DB_CLOSE_DELAY=-1;MVCC=TRUE;DB_CLOSE_ON_EXIT=FALSE&lt;/connection-url&gt;
+ * <b>&lt;connection-property name="music_keyspace"&gt;
+ * camunda
+ * &lt;/connection-property&gt;</b>
+ * &lt;driver&gt;mdbc&lt;/driver&gt;
+ * &lt;security&gt;
+ * &lt;user-name&gt;sa&lt;/user-name&gt;
+ * &lt;password&gt;sa&lt;/password&gt;
+ * &lt;/security&gt;
+ * &lt;/datasource&gt;
+ * &lt;drivers&gt;
+ * <b>&lt;driver name="mdbc" module="com.att.research.mdbc"&gt;
+ * &lt;driver-class&gt;com.att.research.mdbc.ProxyDriver&lt;/driver-class&gt;
+ * &lt;/driver&gt;</b>
+ * &lt;/drivers&gt;
+ * &lt;/datasources&gt;
+ * </pre>
+ * <p>Note: This assumes that you have built and installed the <b>com.att.research.mdbc</b> module within JBoss.
+ */
+package com.att.research.mdbc;
diff --git a/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java b/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java
new file mode 100644
index 0000000..721b389
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java
@@ -0,0 +1,419 @@
+package com.att.research.mdbc.tests;
+
+//import java.sql.Connection;
+//import java.sql.DriverManager;
+//import java.sql.PreparedStatement;
+//import java.sql.ResultSet;
+//import java.sql.SQLException;
+//import java.sql.Statement;
+//import java.util.HashSet;
+//import java.util.Properties;
+//import java.util.Set;
+//
+//import org.h2.tools.Server;
+//import org.junit.After;
+//import org.junit.AfterClass;
+//import org.junit.Before;
+//import org.junit.BeforeClass;
+//import org.junit.Test;
+//import org.slf4j.Logger;
+//import org.slf4j.LoggerFactory;
+//
+//import com.mysql.jdbc.jdbc2.optional.MysqlDataSource;
+
+
+//@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+//@RunWith(ConcurrentTestRunner.class)
+public class ConnectionTest {
+//
+//// static {
+//// System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "INFO");
+//// System.setProperty(org.slf4j.impl.SimpleLogger.LOG_FILE_KEY, String.format("ComparativeAnalysisTest-%d.log", System.currentTimeMillis()));
+//// }
+// private static final Logger LOG = LoggerFactory.getLogger(ConnectionTest.class);
+//
+// Set<Thread> runningThreads = new HashSet<Thread>();
+//
+// @BeforeClass
+// public static void setUpBeforeClass() throws Exception {
+//
+// }
+//
+// @AfterClass
+// public static void tearDownAfterClass() throws Exception {
+//
+// }
+//
+// @Before
+// public void setUp() throws Exception {
+//
+// }
+//
+// @After
+// public void tearDown() throws Exception {
+//
+// }
+//
+// //@Test
+// public void test01() {
+// System.out.println("TEST 1: Getting ready for testing connection to Cassandra");
+//
+// final CassandraConnector client = new CassandraConnector();
+// final String ipAddress = "localhost";
+// final int port = 9042;
+// LOG.info("Connecting to IP Address " + ipAddress + ":" + port + "...");
+// client.connect(ipAddress, port);
+// client.close();
+// System.out.println();
+// }
+//
+// /**
+// * Tests for using jdbc as well as mdbc. In order to use, must have mysql and
+// * running locally. Must have a database EMP created in the
+// * mysql db. Uses "Driver.getConnection(com.mysql.jdbc.Driver)" for jdbc connection
+// *
+// */
+// //@Test
+// public void test02() {
+// System.out.println("TEST 2: Getting ready for testing connection via jdbc");
+// // JDBC driver name and database URL
+// final String JDBC_DRIVER = "com.mysql.jdbc.Driver";
+// final String DB_URL = "jdbc:mysql://localhost/EMP";
+//
+// // Database credentials
+// final String USER = "alice";
+// final String PASS = "bob";
+// Properties connectionProps = new Properties();
+// connectionProps.put("user", USER);
+// connectionProps.put("password", PASS);
+//
+// System.out.println("Connecting directly to database...");
+// connectViaDriverManager(JDBC_DRIVER, DB_URL, connectionProps);
+// System.out.println();
+// }
+//
+// /**
+// * Performs same test as @test02() except this test uses mdbc.
+// *
+// * In order to use, must have mysql and Cassandra services running locally. Must
+// * have a database EMP created in the mysql db. Uses
+// * "Driver.getConnection(com.att.research.mdbc.ProxyDriver)" for mdbc
+// * connection
+// */
+// //@Test
+// public void test03() {
+// System.out.println("TEST 3: Getting ready for testing connection via mdbc");
+// // Database credentials
+// final String USER = "alice";
+// final String PASS = "bob";
+// Properties connectionProps = new Properties();
+// connectionProps.put("user", USER);
+// connectionProps.put("password", PASS);
+//
+// final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver";
+// final String MDBC_DB_URL = "jdbc:mdbc://localhost/TEST";
+// final String MDBC_DB_MIXIN = "mysql";
+// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN);
+//
+// System.out.println("Connecting to database via mdbc");
+// connectViaDriverManager(MDBC_DRIVER, MDBC_DB_URL, connectionProps);
+// System.out.println();
+// }
+//
+// /**
+// * Performs same test as @test02() except this test uses mdbc.
+// *
+// * In order to use, must have mysql and Cassandra services running locally. Must
+// * have a database EMP created in the mysql db. Uses
+// * "Driver.getConnection(com.att.research.mdbc.ProxyDriver)" for mdbc
+// * connection
+// *
+// * Uses preparedStatements
+// */
+// //@Test
+// public void test03point5() {
+// System.out.println("TEST 3.5: Getting ready for testing connection via mdbc w/ PreparedStatement");
+// // Database credentials
+// final String USER = "alice";
+// final String PASS = "bob";
+// Properties connectionProps = new Properties();
+// connectionProps.put("user", USER);
+// connectionProps.put("password", PASS);
+//
+// final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver";
+// final String MDBC_DB_URL = "jdbc:mdbc://localhost/EMP";
+// //final String MDBC_DRIVER = "org.h2.Driver";
+// //final String MDBC_DB_URL = "jdbc:h2:tcp://localhost:9092/~/test";
+// final String MDBC_DB_MIXIN = "mysql";
+// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN);
+//
+// System.out.println("Connecting to database via mdbc");
+// Connection conn = null;
+// PreparedStatement stmt = null;
+// try {
+// //STEP 2: Register JDBC driver
+// Class.forName(MDBC_DRIVER);
+//
+// //STEP 3: Open a connection
+// conn = DriverManager.getConnection(MDBC_DB_URL, connectionProps);
+// conn.setAutoCommit(false);
+//
+// //STEP 4: Execute a query
+// System.out.println("Inserting into DB");
+// stmt = conn.prepareStatement("INSERT INTO EMPLOYEE (id, first, last, age) VALUES (?, ?, ?, ?)");
+// stmt.setString(1, null);
+// stmt.setString(2, "John");
+// stmt.setString(3, "Smith");
+// stmt.setInt(4, 20);
+// stmt.execute();
+//
+// System.out.println("Inserting again into DB");
+// stmt.setString(2, "Jane");
+// stmt.setInt(4, 30);
+// stmt.execute();
+//
+// stmt.close();
+//
+// conn.commit();
+//
+// System.out.println("Querying the DB");
+// stmt = conn.prepareStatement("SELECT id, first, last, age FROM EMPLOYEE WHERE age < ?");
+// stmt.setInt(1, 25);
+// ResultSet rs = stmt.executeQuery();
+// //STEP 5: Extract data from result set
+// while(rs.next()) {
+// //Retrieve by column name
+// int id = rs.getInt("id");
+// int age = rs.getInt("age");
+// String first = rs.getString("first");
+// String last = rs.getString("last");
+//
+// //Display values
+// //*
+// System.out.print("ID: " + id);
+// System.out.print(", Age: " + age);
+// System.out.print(", First: " + first);
+// System.out.println(", Last: " + last);
+// //*/
+// }
+//
+// System.out.println("Querying again");
+// stmt.setInt(1, 35);
+// rs = stmt.executeQuery();
+// //STEP 5: Extract data from result set
+// while(rs.next()) {
+// //Retrieve by column name
+// int id = rs.getInt("id");
+// int age = rs.getInt("age");
+// String first = rs.getString("first");
+// String last = rs.getString("last");
+//
+// //Display values
+// //*
+// System.out.print("ID: " + id);
+// System.out.print(", Age: " + age);
+// System.out.print(", First: " + first);
+// System.out.println(", Last: " + last);
+// //*/
+// }
+//
+//
+// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\"";
+// //stmt.execute(sql);
+//
+// //sql = "DROP TABLE IF EXISTS EMPLOYEE";
+// //stmt.execute(sql);
+//
+// //STEP 6: Clean-up environment
+// rs.close();
+// stmt.close();
+// conn.close();
+// } catch(SQLException se) {
+// //Handle errors for JDBC
+// se.printStackTrace();
+// } catch (Exception e) {
+// //Handle errors for Class.forName
+// e.printStackTrace();
+// } finally {
+// //finally block used to close resources
+// try {
+// if(stmt!=null)
+// stmt.close();
+// } catch(SQLException se2) {
+// }
+// try {
+// if(conn!=null)
+// conn.close();
+// } catch(SQLException se) {
+// se.printStackTrace();
+// }
+// }
+// System.out.println("Done");
+// }
+//
+//
+// /**
+// * Connects to a generic database. Can be used for mdbc or jdbc
+// * @param DBC_DRIVER the driver for which to register (Class.forName(DBC_DRIVER))
+// * @param DB_URL the URL for the database we are testing
+// * @param connectionProps
+// */
+// private void connectViaDriverManager(final String DBC_DRIVER, final String DB_URL, Properties connectionProps) {
+// Connection conn = null;
+// Statement stmt = null;
+// try {
+//
+// //Server server = Server.createTcpServer("-tcpAllowOthers").start();
+// //STEP 2: Register JDBC driver
+// Class.forName(DBC_DRIVER);
+//
+// //STEP 3: Open a connection
+// conn = DriverManager.getConnection(DB_URL, connectionProps);
+// conn.setAutoCommit(false);
+//
+// //STEP 4: Execute a query
+// stmt = conn.createStatement();
+// String sql;
+//
+// //sql = "DROP TABLE EMPLOYEE";
+// //stmt.execute(sql);
+//
+// sql = "CREATE TABLE IF NOT EXISTS EMPLOYEE (id INT primary key, first VARCHAR(20), last VARCHAR(20), age INT);";
+// stmt.execute(sql);
+//
+// sql = "INSERT INTO EMPLOYEE (id, first, last, age) VALUES (\"34\", \"Jane4\", \"Doe4\", \"40\")";
+// stmt.execute(sql);
+//
+// sql = "SELECT id, first, last, age FROM EMPLOYEE";
+// ResultSet rs = stmt.executeQuery(sql);
+//
+// //STEP 5: Extract data from result set
+// while(rs.next()) {
+// //Retrieve by column name
+// int id = rs.getInt("id");
+// int age = rs.getInt("age");
+// String first = rs.getString("first");
+// String last = rs.getString("last");
+//
+// //Display values
+// //*
+// System.out.print("ID: " + id);
+// System.out.print(", Age: " + age);
+// System.out.print(", First: " + first);
+// System.out.println(", Last: " + last);
+// //*/
+//
+// }
+// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\"";
+// //stmt.execute(sql);
+//
+// //sql = "DROP TABLE IF EXISTS EMPLOYEE";
+// //stmt.execute(sql);
+//
+// conn.commit();
+//
+// //STEP 6: Clean-up environment
+// rs.close();
+// stmt.close();
+// conn.close();
+// } catch(SQLException se) {
+// //Handle errors for JDBC
+// se.printStackTrace();
+// } catch (Exception e) {
+// //Handle errors for Class.forName
+// e.printStackTrace();
+// } finally {
+// //finally block used to close resources
+// try {
+// if(stmt!=null)
+// stmt.close();
+// } catch(SQLException se2) {
+// }
+// try {
+// if(conn!=null)
+// conn.close();
+// } catch(SQLException se) {
+// se.printStackTrace();
+// }
+// }
+// }
+//
+//
+//
+// /**
+// * Must be mysql datasource
+// * @throws Exception
+// */
+// //@Test
+// public void test04() throws Exception {
+// String dbConnectionName = "testing";
+// String dbUserId = "alice";
+// String dbPasswd = "bob";
+// String db_url = "jdbc:mysql://localhost/EMP";
+// MysqlDataSource dataSource = new MysqlDataSource();
+// dataSource.setUser(dbUserId);
+// dataSource.setPassword(dbPasswd);
+// dataSource.setURL(db_url);
+//
+//
+// Connection con = dataSource.getConnection();
+// Statement st = con.createStatement();
+// ResultSet rs = null;
+//
+// //FIXME CREATE EMPLOYEE TABLE
+//
+// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) {
+// rs = st.getResultSet();
+// }
+//
+// rs = st.executeQuery("select * from EMPLOYEE;");
+// while (rs.next()) {
+// System.out.println(rs.getString("name"));
+// }
+//
+// if (st.execute("DELETE FROM EMPLOYEE")) {
+// rs = st.getResultSet();
+// }
+// rs.close();
+// st.close();
+// con.close();
+// }
+//
+// /**
+// * Test connection to mysql datasource class
+// * @throws Exception
+// */
+// @Test
+// public void test05() throws Exception {
+// String dbConnectionName = "testing";
+// String dbUserId = "alice";
+// String dbPasswd = "bob";
+// String db_url = "jdbc:mdbc://localhost/EMP";
+// String db_type = "mysql";
+// MdbcDataSource dataSource = new MdbcDataSource();
+// dataSource.setUser(dbUserId);
+// dataSource.setPassword(dbPasswd);
+// dataSource.setURL(db_url);
+// dataSource.setDBType(db_type);
+//
+// Connection con = dataSource.getConnection();
+// Statement st = con.createStatement();
+// ResultSet rs = null;
+//
+// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) {
+// rs = st.getResultSet();
+// }
+//
+// rs = st.executeQuery("select * from EMPLOYEE;");
+// while (rs.next()) {
+// System.out.println(rs.getString("name"));
+// }
+//
+// if (st.execute("DELETE FROM EMPLOYEE")) {
+// rs = st.getResultSet();
+// }
+// rs.close();
+// st.close();
+// con.close();
+// }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/MAIN.java b/src/main/java/com/att/research/mdbc/tests/MAIN.java
new file mode 100755
index 0000000..164b088
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/MAIN.java
@@ -0,0 +1,106 @@
+package com.att.research.mdbc.tests;
+
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.lang.reflect.Constructor;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+
+/**
+ * Run all the tests against all the configurations specified in /tests.json.
+ *
+ * @author Robert Eby
+ */
+public class MAIN {
+ public static final String CONFIG = "/tests.json";
+
+ /**
+ * This class runs all the tests against all the configurations specified in /tests.json.
+ * It assumes that a copy of Cassandra is running locally on port 9042, that a copy of H2
+ * server is is running locally on port 8082, and that a copy of MySQL is running locally
+ * on port 3306. These can be adjusted by editing the /tests.json file.
+ *
+ * @param args command line arguments
+ * @throws Exception if anything goes wrong
+ */
+ public static void main(String[] args) throws Exception {
+ new MAIN(args).run();
+ System.exit(0);
+ }
+
+ private JSONArray configs;
+ private List<Test> tests;
+ private int total_success, total_failure;
+
+ public MAIN(String[] args) throws Exception {
+ configs = null;
+ tests = new ArrayList<Test>();
+ total_success = total_failure = 0;
+
+ InputStream is = null;
+ if (args.length == 0) {
+ is = this.getClass().getResourceAsStream(CONFIG);
+ } else {
+ is = new FileInputStream(args[0]);
+ }
+ if (is != null) {
+ JSONObject jo = new JSONObject(new JSONTokener(is));
+ is.close();
+ configs = jo.getJSONArray("configs");
+
+ JSONArray ja = jo.getJSONArray("tests");
+ for (int i = 0; i < ja.length(); i++) {
+ Class<?> cl = Class.forName(ja.getString(i).trim());
+ if (cl != null) {
+ Constructor<?> con = cl.getConstructor();
+ tests.add((Test) con.newInstance());
+ }
+ }
+ } else {
+ String conf = (args.length == 0) ? CONFIG : args[0];
+ throw new Exception("Cannot find configuration resource: "+conf);
+ }
+ }
+ public void run() {
+ Logger logger = Logger.getLogger(this.getClass());
+ for (int ix = 0; ix < configs.length(); ix++) {
+ JSONObject config = configs.getJSONObject(ix);
+ int succ = 0, fail = 0;
+ logger.info("*** Testing with configuration: "+config.getString("description"));
+ System.out.println("Testing with configuration: "+config.getString("description"));
+ for (Test t : tests) {
+ String nm = t.getName() + " ............................................................";
+ System.out.print(" Test: "+nm.substring(0, 60));
+ try {
+ List<String> msgs = t.run(config);
+ if (msgs == null || msgs.size() == 0) {
+ succ++;
+ System.out.println(" OK!");
+ } else {
+ fail++;
+ System.out.println(" Fail!");
+ System.out.flush();
+ for (String m : msgs) {
+ System.out.println(" "+m);
+ }
+ System.out.flush();
+ }
+ } catch (Exception x) {
+ fail++;
+ System.out.println(" Fail!");
+ }
+ }
+ System.out.println();
+ total_success += succ;
+ total_failure += fail;
+ }
+ String m = "Testing completed: "+total_success+" successful tests, "+total_failure+": failures.";
+ logger.info(m);
+ System.out.println(m);
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test.java b/src/main/java/com/att/research/mdbc/tests/Test.java
new file mode 100755
index 0000000..0b8c0ab
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test.java
@@ -0,0 +1,105 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Properties;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Provides the abstract interface for a Test, as well as some common functions.
+ *
+ * @author Robert Eby
+ */
+public abstract class Test {
+ public static final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver";
+
+ /**
+ * Each test derived from this class must implement this method,
+ * which runs the test and produces a list of error messages.
+ *
+ * @param config a JSONObject describing the configuration to use for this run of the test
+ * @return the list of messages. If the list is empty, the test is considered to have run
+ * successfully.
+ */
+ abstract public List<String> run(JSONObject config);
+
+ public String getName() {
+ String s = this.getClass().getName();
+ return s.replaceAll("com.att.research.mdbc.tests.", "");
+ }
+
+ public Properties buildProperties(JSONObject config, int i) {
+ Properties p = new Properties();
+ for (String key : config.keySet()) {
+ if (key.equals("connections")) {
+ JSONArray ja = config.getJSONArray("connections");
+ JSONObject connection = ja.getJSONObject(i);
+ for (String key2 : connection.keySet()) {
+ p.setProperty(key2, connection.getString(key2));
+ }
+ } else {
+ p.setProperty(key, config.getString(key));
+ }
+ }
+ return p;
+ }
+
+ public Connection getDBConnection(Properties pr) throws SQLException, ClassNotFoundException {
+ Class.forName(MDBC_DRIVER);
+ String url = pr.getProperty("url");
+ return DriverManager.getConnection(url, pr);
+ }
+
+ public void assertNotNull(Object o) throws Exception {
+ if (o == null)
+ throw new Exception("Object is null");
+ }
+
+ public void assertTableContains(int connid, Connection conn, String tbl, Object... kv) throws Exception {
+ ResultSet rs = getRow(conn, tbl, kv);
+ boolean throwit = !rs.next();
+ rs.close();
+ if (throwit) {
+ throw new Exception("Conn id "+connid+" Table "+tbl+" does not have a row with "+catkeys(kv));
+ }
+ }
+ public void assertTableDoesNotContain(int connid, Connection conn, String tbl, Object... kv) throws Exception {
+ boolean throwit = true;
+ try {
+ assertTableContains(connid, conn, tbl, kv);
+ } catch (Exception x) {
+ throwit = false;
+ }
+ if (throwit) {
+ throw new Exception("Conn id "+connid+" Table "+tbl+" does have a row with "+catkeys(kv));
+ }
+ }
+ public ResultSet getRow(Connection conn, String tbl, Object... kv) throws SQLException {
+ Statement stmt = conn.createStatement();
+ StringBuilder sql = new StringBuilder("SELECT * FROM ")
+ .append(tbl)
+ .append(" WHERE ")
+ .append(catkeys(kv));
+ return stmt.executeQuery(sql.toString());
+ }
+ public String catkeys(Object... kv) {
+ StringBuilder sql = new StringBuilder();
+ String pfx = "";
+ for (int i = 0; (i+1) < kv.length; i += 2) {
+ sql.append(pfx).append(kv[i]).append("=");
+ if (kv[i+1] instanceof String) {
+ sql.append("'").append(kv[i+1]).append("'");
+ } else {
+ sql.append(kv[i+1].toString());
+ }
+ pfx = " AND ";
+ }
+ return sql.toString();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Delete.java b/src/main/java/com/att/research/mdbc/tests/Test_Delete.java
new file mode 100755
index 0000000..8017cb3
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test_Delete.java
@@ -0,0 +1,70 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Test that DELETEs work on the original DB, and are correctly copied to replica DBs.
+ *
+ * @author Robert Eby
+ */
+public class Test_Delete extends Test {
+ private final String TBL = "DELTABLE";
+
+ @Override
+ public List<String> run(JSONObject config) {
+ List<String> msgs = new ArrayList<String>();
+ JSONArray connections = config.getJSONArray("connections");
+ Connection[] conn = new Connection[connections.length()];
+ Statement[] stmt = new Statement[conn.length];
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i] = getDBConnection(buildProperties(config, i));
+ assertNotNull(conn[i]);
+ stmt[i] = conn[i].createStatement();
+ assertNotNull(stmt[i]);
+ }
+
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i].setAutoCommit(true);
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS DELTABLE(ID_ varchar(255), RANDOMTXT varchar(255), primary key (ID_))");
+ }
+ stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('1', 'Everything''s Negotiable Except Cutting Medicaid')");
+ stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('2', 'Can a Sideways Elevator Help Designers Build Taller Skyscrapers?')");
+ stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('3', 'Can a Bernie Sanders Ally Win the Maryland Governor''s Mansion?')");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], TBL, "ID_", "1");
+ assertTableContains(i, conn[i], TBL, "ID_", "2");
+ assertTableContains(i, conn[i], TBL, "ID_", "3");
+ }
+
+ stmt[0].execute("DELETE FROM DELTABLE WHERE ID_ = '1'");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1");
+ assertTableContains(i, conn[i], TBL, "ID_", "2");
+ assertTableContains(i, conn[i], TBL, "ID_", "3");
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ } finally {
+ for (int i = 0; i < stmt.length; i++) {
+ if (stmt[i] != null)
+ stmt[i].close();
+ }
+ for (int i = 0; i < conn.length; i++) {
+ if (conn[i] != null)
+ conn[i].close();
+ }
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ }
+ return msgs;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Insert.java b/src/main/java/com/att/research/mdbc/tests/Test_Insert.java
new file mode 100755
index 0000000..4c19dbd
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test_Insert.java
@@ -0,0 +1,94 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Test that INSERTs work to the original DB, and are correctly copied to replica DBs.
+ *
+ * @author Robert Eby
+ */
+public class Test_Insert extends Test {
+ private final String PERSON = "PERSON";
+ private final String SONG = "SONG";
+
+ @Override
+ public List<String> run(JSONObject config) {
+ List<String> msgs = new ArrayList<String>();
+ JSONArray connections = config.getJSONArray("connections");
+ Connection[] conn = new Connection[connections.length()];
+ Statement[] stmt = new Statement[conn.length];
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i] = getDBConnection(buildProperties(config, i));
+ assertNotNull(conn[i]);
+ stmt[i] = conn[i].createStatement();
+ assertNotNull(stmt[i]);
+ }
+
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i].setAutoCommit(true);
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))");
+ }
+ stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Zaphod', '111-22-3333')");
+ stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Ripley', '444-55-6666')");
+ stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Spock', '777-88-9999')");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], PERSON, "ID_", "1");
+ assertTableContains(i, conn[i], PERSON, "ID_", "2");
+ assertTableContains(i, conn[i], PERSON, "ID_", "3");
+ }
+
+ stmt[0].execute("UPDATE PERSON SET NAME = 'Jabba' WHERE ID_ = '2'");
+ for (int i = 0; i < conn.length; i++) {
+ ResultSet rs = getRow(conn[i], PERSON, "ID_", "2");
+ if (rs.next()) {
+ String v = rs.getString("NAME");
+ if (!v.equals("Jabba"))
+ throw new Exception("Table PERSON, row with ID_ = '2' was not updated.");
+ } else {
+ throw new Exception("Table PERSON does not have a row with ID_ = '2'");
+ }
+ rs.close();
+ }
+
+ for (int i = 0; i < conn.length; i++) {
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))");
+ }
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], SONG, "ID_", "1", "PREF", 1);
+ assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 5);
+ assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 2);
+ assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 77);
+ assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 69);
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ } finally {
+ for (int i = 0; i < stmt.length; i++) {
+ if (stmt[i] != null)
+ stmt[i].close();
+ }
+ for (int i = 0; i < conn.length; i++) {
+ if (conn[i] != null)
+ conn[i].close();
+ }
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ }
+ return msgs;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java b/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java
new file mode 100755
index 0000000..1153c9b
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java
@@ -0,0 +1,74 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Test that transactions work between the original DB, and replica DBs.
+ *
+ * @author Robert Eby
+ */
+public class Test_Transactions extends Test {
+ private final String TBL = "TRANSTEST";
+
+ @Override
+ public List<String> run(JSONObject config) {
+ List<String> msgs = new ArrayList<String>();
+ JSONArray connections = config.getJSONArray("connections");
+ Connection[] conn = new Connection[connections.length()];
+ Statement[] stmt = new Statement[conn.length];
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i] = getDBConnection(buildProperties(config, i));
+ assertNotNull(conn[i]);
+ stmt[i] = conn[i].createStatement();
+ assertNotNull(stmt[i]);
+ }
+
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i].setAutoCommit(true);
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS TRANSTEST(ID_ varchar(12), STUFF varchar(255), primary key (ID_))");
+ conn[i].setAutoCommit(false);
+ }
+ stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('1', 'CenturyLink Now Under Fire on All Sides For Fraudulent Billing')");
+ stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('2', 'Netflix Now in Half of All Broadband Households, Study Says')");
+ stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('3', 'Private Data Of 6 Million Verizon Customers Exposed')");
+ assertTableContains(0, conn[0], TBL, "ID_", "1");
+ assertTableContains(0, conn[0], TBL, "ID_", "2");
+ assertTableContains(0, conn[0], TBL, "ID_", "3");
+ for (int i = 1; i < conn.length; i++) {
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1");
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "2");
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "3");
+ }
+ conn[0].commit();
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], TBL, "ID_", "1");
+ assertTableContains(i, conn[i], TBL, "ID_", "2");
+ assertTableContains(i, conn[i], TBL, "ID_", "3");
+ }
+
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ } finally {
+ for (int i = 0; i < stmt.length; i++) {
+ if (stmt[i] != null)
+ stmt[i].close();
+ }
+ for (int i = 0; i < conn.length; i++) {
+ if (conn[i] != null)
+ conn[i].close();
+ }
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ }
+ return msgs;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/package-info.java b/src/main/java/com/att/research/mdbc/tests/package-info.java
new file mode 100755
index 0000000..ee993db
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/package-info.java
@@ -0,0 +1,165 @@
+/**
+ * <p>
+ * This package provides a testing harness to test the various features of MDBC against
+ * multiple combinations of database and MUSIC mixins. The configurations (consisting of
+ * database information and mixin combinations) to test, as well as the specific tests to
+ * run are all defined in the configuration file <code>test.json</code>.
+ * </p>
+ * <p>
+ * To run the tests against all the configurations specified in /tests.json, do the following:
+ * </p>
+ * <pre>
+ * java com.att.research.mdbc.tests.MAIN [ configfile ]
+ * </pre>
+ * <p>
+ * It is assumed that a copy of Cassandra is running locally on port 9042,
+ * that a copy of H2 server is is running locally on port 8082,
+ * and that a copy of MySQL (or MariaDB) is running locally on port 3306.
+ * These can be adjusted by editing the /tests.json file.
+ * </p>
+ * <p>
+ * When building a copy of MDBC for production use, this package can be safely removed.
+ * </p>
+ * <p>
+ * The initial copy of <i>tests.json</i> is as follows:
+ * </p>
+ * <pre>
+ * {
+ * "tests": [
+ * "com.att.research.mdbc.tests.Test_Insert",
+ * "com.att.research.mdbc.tests.Test_Delete",
+ * "com.att.research.mdbc.tests.Test_Transactions"
+ * ],
+ * "configs": [
+ * {
+ * "description": "H2 with Cassandra with two connections",
+ * "MDBC_DB_MIXIN": "h2",
+ * "MDBC_MUSIC_MIXIN": "cassandra",
+ * "replicas": "0,1",
+ * "music_keyspace": "mdbctest1",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc:mem:db0",
+ * "user": "",
+ * "password": "",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc:mem:db1",
+ * "user": "",
+ * "password": "",
+ * "myid": "1"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "H2 with Cassandra2 with three connections",
+ * "MDBC_DB_MIXIN": "h2",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1,2",
+ * "music_keyspace": "mdbctest2",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "user": "",
+ * "password": "",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc:mem:db0",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc:mem:db1",
+ * "myid": "1"
+ * },
+ * {
+ * "name": "Connection 2",
+ * "url": "jdbc:mdbc:mem:db2",
+ * "myid": "2"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "H2 Server with Cassandra2 with two connections",
+ * "MDBC_DB_MIXIN": "h2server",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1",
+ * "music_keyspace": "mdbctest3",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc:tcp://localhost/mdbc0",
+ * "user": "",
+ * "password": "",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc:tcp://localhost/mdbc1",
+ * "user": "",
+ * "password": "",
+ * "myid": "1"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "MySQL with Cassandra2 with two connections",
+ * "MDBC_DB_MIXIN": "mysql",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1,2",
+ * "music_keyspace": "mdbctest4",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "user": "root",
+ * "password": "abc123",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc://127.0.0.1:3306/mdbc",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc://127.0.0.1:3306/mdbc2",
+ * "myid": "1"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "H2 (DB #1) and MySQL (DB #2) with Cassandra2",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1",
+ * "music_keyspace": "mdbctest5",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "MDBC_DB_MIXIN": "h2",
+ * "url": "jdbc:mdbc:mem:db9",
+ * "user": "",
+ * "password": "",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "MDBC_DB_MIXIN": "mysql",
+ * "url": "jdbc:mdbc://127.0.0.1:3306/mdbc3",
+ * "user": "root",
+ * "password": "abc123",
+ * "myid": "1"
+ * }
+ * ]
+ * }
+ * ]
+ * }
+ * </pre>
+ */
+package com.att.research.mdbc.tests;
diff --git a/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java b/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java
new file mode 100644
index 0000000..555b863
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java
@@ -0,0 +1,71 @@
+package com.att.research.mdbc.tools;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import com.att.research.mdbc.configurations.TablesConfiguration;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import org.onap.music.main.MusicPureCassaCore;
+
+import java.io.FileNotFoundException;
+import java.util.List;
+
+public class CreateNodeConfigurations {
+ public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreateNodeConfigurations.class);
+
+ private String tables;
+ @Parameter(names = { "-t", "--table-configurations" }, required = true,
+ description = "This is the input file that is going to have the configuration for all the tables and partitions")
+ private String tableConfigurationsFile;
+ @Parameter(names = { "-b", "--basename" }, required = true,
+ description = "This base name for all the outputs files that are going to be created")
+ private String basename;
+ @Parameter(names = { "-o", "--output-dir" }, required = true,
+ description = "This is the output directory that is going to contain all the configuration file to be generated")
+ private String outputDirectory;
+ @Parameter(names = { "-h", "-help", "--help" }, help = true,
+ description = "Print the help message")
+ private boolean help = false;
+
+ private TablesConfiguration inputConfig;
+
+ public CreateNodeConfigurations(){}
+
+
+ public void readInput(){
+ try {
+ inputConfig = TablesConfiguration.readJsonFromFile(tableConfigurationsFile);
+ } catch (FileNotFoundException e) {
+ LOG.error("Input file is invalid or not found");
+ System.exit(1);
+ }
+ }
+
+ public void createAndSaveNodeConfigurations(){
+ List<NodeConfiguration> nodes = null;
+ try {
+ nodes = inputConfig.initializeAndCreateNodeConfigurations();
+ } catch (MDBCServiceException e) {
+ e.printStackTrace();
+ }
+ int counter = 0;
+ for(NodeConfiguration nodeConfig : nodes){
+ String name = (nodeConfig.nodeName==null||nodeConfig.nodeName.isEmpty())?Integer.toString(counter++): nodeConfig.nodeName;
+ nodeConfig.saveToFile(outputDirectory+"/"+basename+"-"+name+".json");
+ }
+ }
+
+ public static void main(String[] args) {
+ CreateNodeConfigurations configs = new CreateNodeConfigurations();
+ @SuppressWarnings("deprecation")
+ JCommander jc = new JCommander(configs, args);
+ if (configs.help) {
+ jc.usage();
+ System.exit(1);
+ return;
+ }
+ configs.readInput();
+ configs.createAndSaveNodeConfigurations();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tools/CreatePartition.java b/src/main/java/com/att/research/mdbc/tools/CreatePartition.java
new file mode 100644
index 0000000..09524cb
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tools/CreatePartition.java
@@ -0,0 +1,66 @@
+package com.att.research.mdbc.tools;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.MDBCUtils;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class CreatePartition {
+ public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreatePartition.class);
+
+ @Parameter(names = { "-t", "--tables" }, required = true,
+ description = "This is the tables that are assigned to this ")
+ private String tables;
+ @Parameter(names = { "-f", "--file" }, required = true,
+ description = "This is the output file that is going to have the configuration for the ranges")
+ private String file;
+ @Parameter(names = { "-i", "--tit-index" }, required = true,
+ description = "Index in the TiT Table")
+ private String titIndex;
+ @Parameter(names = { "-n", "--tit-table-name" }, required = true,
+ description = "Tit Table name")
+ private String titTable;
+ @Parameter(names = { "-r", "--redorecords-table-name" }, required = true,
+ description = "Redo Records Table name")
+ private String rrTable;
+ @Parameter(names = { "-p", "--partition-id" }, required = true,
+ description = "Partition Id")
+ private String partitionId;
+ @Parameter(names = { "-h", "-help", "--help" }, help = true,
+ description = "Print the help message")
+ private boolean help = false;
+
+ NodeConfiguration config;
+
+ public CreatePartition(){
+ }
+
+ public void convert(){
+ config = new NodeConfiguration(tables,titIndex,titTable,partitionId,"test","",rrTable);
+ }
+
+ public void saveToFile(){
+ config.saveToFile(file);
+ }
+
+ public static void main(String[] args) {
+
+ CreatePartition newPartition = new CreatePartition();
+ @SuppressWarnings("deprecation")
+ JCommander jc = new JCommander(newPartition, args);
+ if (newPartition.help) {
+ jc.usage();
+ System.exit(1);
+ return;
+ }
+ newPartition.convert();
+ newPartition.saveToFile();
+ }
+}
diff --git a/src/main/javadoc/overview.html b/src/main/javadoc/overview.html
new file mode 100755
index 0000000..162e7ec
--- /dev/null
+++ b/src/main/javadoc/overview.html
@@ -0,0 +1,37 @@
+<!DOCTYPE HTML>
+<html>
+<body>
+
+<p>
+MDBC is a JDBC driver that provides an application programmer a way to automatically copy
+data to and from a running copy of MUSIC/Cassandra.
+The driver does this by intercepting all DELETES, INSERTS, SELECTS, and UPDATES and copying
+modified table rows to and from Cassandra.
+</p>
+<p>
+MDBC can automatically adopt to the database in use, as well as the style of MUSIC interaction,
+by the use of "mixins". Each JDBC Connection via MDBC specifies two mixins to use:
+</p>
+<ul>
+<li>a Mixin that conforms to the {@link com.att.research.mdbc.mixins.MusicInterface} specification
+for communicating with MUSIC/Cassandra.</li>
+<li>a Mixin that conforms to the {@link com.att.research.mdbc.mixins.DBInterface} specification
+for communicating with the underlying database in use.</li>
+</ul>
+<p>
+More details are provided on the package pages for
+<code>com.att.research.mdbc</code> and
+<code>com.att.research.mdbc.mixins</code>.
+</p>
+<h2>Limitations</h2>
+There are several limitations to the use of MDBC:
+<ul>
+<li>The <code>java.sql.Statement.executeBatch()</code> method is not supported by MDBC.
+It is not prohibited either; your results will be unpredictable.</li>
+<li>When used with a DB server, there is some delay as dirty row information is copied
+from a table in the database, to the dirty table in Cassandra. This opens a window
+during which all sorts of mischief may occur.</li>
+</ul>
+
+</body>
+</html>
diff --git a/src/main/resources/META-INF/services/java.sql.Driver b/src/main/resources/META-INF/services/java.sql.Driver
new file mode 100755
index 0000000..7228fe7
--- /dev/null
+++ b/src/main/resources/META-INF/services/java.sql.Driver
@@ -0,0 +1 @@
+com.att.research.mdbc.ProxyDriver
diff --git a/src/main/resources/log4j.properties b/src/main/resources/log4j.properties
new file mode 100755
index 0000000..88ee415
--- /dev/null
+++ b/src/main/resources/log4j.properties
@@ -0,0 +1,14 @@
+log4j.rootLogger=DEBUG, stdout
+
+# Direct log messages to stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+
+#log4j.appender.filelog=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.filelog.file=/tmp/log
+#log4j.appender.filelog.datePattern='.'yyyyMMdd
+#log4j.appender.filelog.append=true
+#log4j.appender.filelog.layout=org.apache.log4j.PatternLayout
+#log4j.appender.filelog.layout.ConversionPattern=%d %-5p [%t] - %m%n
diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml
new file mode 100644
index 0000000..df02405
--- /dev/null
+++ b/src/main/resources/logback.xml
@@ -0,0 +1,370 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ ============LICENSE_START==========================================
+ mdbc
+ ===================================================================
+ Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+ ===================================================================
+
+ Unless otherwise specified, all software contained herein is licensed
+ under the Apache License, Version 2.0 (the “License”);
+ you may not use this software except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ Unless otherwise specified, all documentation contained herein is licensed
+ under the Creative Commons License, Attribution 4.0 Intl. (the “License”);
+ you may not use this documentation except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://creativecommons.org/licenses/by/4.0/
+
+ Unless required by applicable law or agreed to in writing, documentation
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ ============LICENSE_END============================================
+
+
+ -->
+
+<configuration scan="true" scanPeriod="3 seconds" debug="true">
+ <!--
+ Logback files for the mdbc Driver "mdbc"
+ are created in directory ${catalina.base}/logs/mdbc;
+ e.g., apache-tomcat-8.0.35/logs/mdbc/application.log
+ -->
+ <!--<jmxConfigurator /> -->
+
+ <!-- specify the component name -->
+ <property name="componentName" value="mdbc"></property>
+
+ <!-- specify the base path of the log directory -->
+ <property name="logDirPrefix" value="${catalina.base}/logs"></property>
+
+ <!-- The directories where logs are written -->
+ <property name="logDirectory" value="${logDirPrefix}/${componentName}" />
+ <!-- Can easily relocate debug logs by modifying this path. -->
+ <property name="debugLogDirectory" value="${logDirPrefix}/${componentName}" />
+
+ <!-- log file names -->
+ <property name="generalLogName" value="application" />
+ <property name="errorLogName" value="error" />
+ <property name="metricsLogName" value="metrics" />
+ <property name="auditLogName" value="audit" />
+ <property name="debugLogName" value="debug" />
+ <!--
+ These loggers are not used in code (yet).
+ <property name="securityLogName" value="security" />
+ <property name="policyLogName" value="policy" />
+ <property name="performanceLogName" value="performance" />
+ <property name="serverLogName" value="server" />
+ -->
+
+ <!-- 1610 Logging Fields Format Revisions -->
+ <property name="auditLoggerPattern"
+ value="%X{AuditLogBeginTimestamp}|%X{AuditLogEndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{Timer}|%X{ServerFQDN}|%X{ClientIPAddress}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" />
+
+ <property name="metricsLoggerPattern"
+ value="%X{MetricsLogBeginTimestamp}|%X{MetricsLogEndTimestamp}|%X{RequestId}|%X{ServiceInstanceId}|%thread|%X{VirtualServerName}|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{StatusCode}|%X{ResponseCode}|%X{ResponseDescription}|%X{InstanceUUID}|%.-5level|%X{AlertSeverity}|%X{ServerIPAddress}|%X{Timer}|%X{ServerFQDN}|%X{ClientIPAddress}|%X{ClassName}|%X{Unused}|%X{ProcessKey}|%X{TargetVisualEntity}|%X{CustomField1}|%X{CustomField2}|%X{CustomField3}|%X{CustomField4}| %msg%n" />
+
+ <property name="errorLoggerPattern"
+ value="%date{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestId}|%thread|%X{ServiceName}|%X{PartnerName}|%X{TargetEntity}|%X{TargetServiceName}|%X{ClassName}|%X{AlertSeverity}|%X{ErrorCode}|%X{ErrorDescription}| %msg%n" />
+
+ <property name="defaultLoggerPattern"
+ value="%date{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestId}|%thread|%X{ClassName}| %msg%n" />
+
+ <!-- use %class so library logging calls yield their class name -->
+ <property name="applicationLoggerPattern"
+ value="%date{yyyy-MM-dd'T'HH:mm:ss.SSSXXX}|%X{RequestId}|%thread|%class{36}| %msg%n" />
+
+ <!-- Example evaluator filter applied against console appender -->
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>${defaultLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <!-- ============================================================================ -->
+ <!-- EELF Appenders -->
+ <!-- ============================================================================ -->
+
+ <!-- The EELFAppender is used to record events to the general application
+ log -->
+
+
+ <appender name="EELF"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${generalLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- daily rollover -->
+ <fileNamePattern>${logDirectory}/${generalLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+
+ <!-- keep 30 days' worth of history capped at 3GB total size -->
+ <maxHistory>30</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+
+ </rollingPolicy>
+ <encoder>
+ <pattern>${applicationLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELF" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <!-- Class name is part of caller data -->
+ <includeCallerData>true</includeCallerData>
+ <appender-ref ref="EELF" />
+ </appender>
+
+ <!-- EELF Security Appender. This appender is used to record security events
+ to the security log file. Security events are separate from other loggers
+ in EELF so that security log records can be captured and managed in a secure
+ way separate from the other logs. This appender is set to never discard any
+ events. -->
+ <!--
+ <appender name="EELFSecurity"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${securityLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${securityLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFSecurity" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="EELFSecurity" />
+ </appender>
+ -->
+
+ <!-- EELF Performance Appender. This appender is used to record performance
+ records. -->
+ <!--
+ <appender name="EELFPerformance"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${performanceLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${performanceLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <outputPatternAsHeader>true</outputPatternAsHeader>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPerformance" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPerformance" />
+ </appender>
+ -->
+
+ <!-- EELF Server Appender. This appender is used to record Server related
+ logging events. The Server logger and appender are specializations of the
+ EELF application root logger and appender. This can be used to segregate Server
+ events from other components, or it can be eliminated to record these events
+ as part of the application root log. -->
+ <!--
+ <appender name="EELFServer"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${serverLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${serverLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFServer" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFServer" />
+ </appender>
+ -->
+
+ <!-- EELF Policy Appender. This appender is used to record Policy engine
+ related logging events. The Policy logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+ <!--
+ <appender name="EELFPolicy"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${policyLogName}.log</file>
+ <rollingPolicy
+ class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${logDirectory}/${policyLogName}.%i.log.zip
+ </fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>9</maxIndex>
+ </rollingPolicy>
+ <triggeringPolicy
+ class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>5MB</maxFileSize>
+ </triggeringPolicy>
+ <encoder>
+ <pattern>${defaultPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFPolicy" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFPolicy" />
+ </appender>
+ -->
+
+ <!-- EELF Audit Appender. This appender is used to record audit engine
+ related logging events. The audit logger and appender are specializations
+ of the EELF application root logger and appender. This can be used to segregate
+ Policy engine events from other components, or it can be eliminated to record
+ these events as part of the application root log. -->
+
+ <appender name="EELFAudit"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${auditLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- daily rollover -->
+ <fileNamePattern>${logDirectory}/${auditLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+
+ <!-- keep 30 days' worth of history capped at 3GB total size -->
+ <maxHistory>30</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+
+ </rollingPolicy>
+ <encoder>
+ <pattern>${auditLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+ <appender name="asyncEELFAudit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFAudit" />
+ </appender>
+
+ <appender name="EELFMetrics"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${metricsLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- daily rollover -->
+ <fileNamePattern>${logDirectory}/${metricsLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+
+ <!-- keep 30 days' worth of history capped at 3GB total size -->
+ <maxHistory>30</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+
+ </rollingPolicy>
+ <encoder>
+ <pattern>${metricsLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+
+ <appender name="asyncEELFMetrics" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFMetrics"/>
+ </appender>
+
+ <appender name="EELFError"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${logDirectory}/${errorLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- daily rollover -->
+ <fileNamePattern>${logDirectory}/${errorLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+
+ <!-- keep 30 days' worth of history capped at 3GB total size -->
+ <maxHistory>30</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+
+ </rollingPolicy>
+ <encoder>
+ <pattern>${errorLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFError" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFError"/>
+ </appender>
+
+ <appender name="EELFDebug"
+ class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${debugLogDirectory}/${debugLogName}.log</file>
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- daily rollover -->
+ <fileNamePattern>${logDirectory}/${debugLogName}.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
+
+ <!-- keep 30 days' worth of history capped at 3GB total size -->
+ <maxHistory>30</maxHistory>
+ <totalSizeCap>3GB</totalSizeCap>
+
+ </rollingPolicy>
+ <encoder>
+ <pattern>${defaultLoggerPattern}</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="asyncEELFDebug" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>256</queueSize>
+ <appender-ref ref="EELFDebug" />
+ </appender>
+
+
+
+
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELF" />
+ </logger>
+
+ <logger name="com.att.eelf" level="info" additivity="false">
+ <appender-ref ref="asyncEELFAudit" />
+ </logger>
+
+ <logger name="com.att.eelf" level="debug" additivity="false">
+ <appender-ref ref="asyncEELFDebug" />
+ </logger>
+
+ <logger name="com.att.eelf.error" level="info" additivity="false">
+ <appender-ref ref="asyncEELFError" />
+ </logger>
+
+ <logger name="com.att.eelf.metrics" level="info" additivity="false">
+ <appender-ref ref="asyncEELFMetrics" />
+ </logger>
+
+ <root level="DEBUG">
+ <appender-ref ref="asyncEELF" />
+ </root>
+
+</configuration>
diff --git a/src/main/resources/mdbc.properties b/src/main/resources/mdbc.properties
new file mode 100755
index 0000000..f6e722c
--- /dev/null
+++ b/src/main/resources/mdbc.properties
@@ -0,0 +1,12 @@
+#
+# A list of all Mixins that should be checked by MDBC
+#
+MIXINS= \
+ com.att.research.mdbc.mixins.H2Mixin \
+ com.att.research.mdbc.mixins.H2ServerMixin \
+ com.att.research.mdbc.mixins.MySQLMixin \
+ com.att.research.mdbc.mixins.CassandraMixin \
+ com.att.research.mdbc.mixins.Cassandra2Mixin
+
+critical.tables= \
+ TEST \ No newline at end of file
diff --git a/src/main/resources/mdbc_driver.properties b/src/main/resources/mdbc_driver.properties
new file mode 100644
index 0000000..1549d5f
--- /dev/null
+++ b/src/main/resources/mdbc_driver.properties
@@ -0,0 +1,13 @@
+#
+# A list of all Mixins that should be checked by MDBC
+#
+MIXINS= \
+ com.att.research.mdbc.mixins.H2Mixin \
+ com.att.research.mdbc.mixins.H2ServerMixin \
+ com.att.research.mdbc.mixins.MySQLMixin \
+ com.att.research.mdbc.mixins.CassandraMixin \
+ com.att.research.mdbc.mixins.Cassandra2Mixin
+
+DEFAULT_DRIVERS=\
+ org.h2.Driver \
+ com.mysql.jdbc.Driver \ No newline at end of file
diff --git a/src/main/resources/music.properties b/src/main/resources/music.properties
new file mode 100644
index 0000000..204e3f6
--- /dev/null
+++ b/src/main/resources/music.properties
@@ -0,0 +1,8 @@
+cassandra.host =\
+ 143.215.128.49
+cassandra.user =\
+ cassandra
+cassandra.password =\
+ cassandra
+zookeeper.host =\
+ localhost \ No newline at end of file
diff --git a/src/main/resources/tests.json b/src/main/resources/tests.json
new file mode 100755
index 0000000..ac9221e
--- /dev/null
+++ b/src/main/resources/tests.json
@@ -0,0 +1,163 @@
+{
+ "tests": [
+ "com.att.research.mdbc.tests.Test_Insert",
+ "com.att.research.mdbc.tests.Test_Delete",
+ "com.att.research.mdbc.tests.Test_Transactions"
+ ],
+ "configs": [
+ {
+ "description": "H2 with Cassandra with two connections",
+ "MDBC_DB_MIXIN": "h2",
+ "MDBC_MUSIC_MIXIN": "cassandra",
+ "replicas": "0,1",
+ "music_keyspace": "mdbctest1",
+ "music_address": "localhost",
+ "music_rfactor": "1",
+ "connections": [
+ {
+ "name": "Connection 0",
+ "url": "jdbc:mdbc:mem:db0",
+ "user": "",
+ "password": "",
+ "myid": "0"
+ },
+ {
+ "name": "Connection 1",
+ "url": "jdbc:mdbc:mem:db1",
+ "user": "",
+ "password": "",
+ "myid": "1"
+ }
+ ]
+ },
+ {
+ "description": "H2 with Cassandra2 with three connections",
+ "MDBC_DB_MIXIN": "h2",
+ "MDBC_MUSIC_MIXIN": "cassandra2",
+ "replicas": "0,1,2",
+ "music_keyspace": "mdbctest2",
+ "music_address": "localhost",
+ "music_rfactor": "1",
+ "user": "",
+ "password": "",
+ "connections": [
+ {
+ "name": "Connection 0",
+ "url": "jdbc:mdbc:mem:db0",
+ "myid": "0"
+ },
+ {
+ "name": "Connection 1",
+ "url": "jdbc:mdbc:mem:db1",
+ "myid": "1"
+ },
+ {
+ "name": "Connection 2",
+ "url": "jdbc:mdbc:mem:db2",
+ "myid": "2"
+ }
+ ]
+ },
+ {
+ "description": "H2 Server with Cassandra2 with two connections",
+ "MDBC_DB_MIXIN": "h2server",
+ "MDBC_MUSIC_MIXIN": "cassandra2",
+ "replicas": "0,1",
+ "music_keyspace": "mdbctest3",
+ "music_address": "localhost",
+ "music_rfactor": "1",
+ "connections": [
+ {
+ "name": "Connection 0",
+ "url": "jdbc:mdbc:tcp://localhost/mdbc0",
+ "user": "",
+ "password": "",
+ "myid": "0"
+ },
+ {
+ "name": "Connection 1",
+ "url": "jdbc:mdbc:tcp://localhost/mdbc1",
+ "user": "",
+ "password": "",
+ "myid": "1"
+ }
+ ]
+ },
+ {
+ "description": "MySQL with Cassandra2 with two connections",
+ "MDBC_DB_MIXIN": "mysql",
+ "MDBC_MUSIC_MIXIN": "cassandra2",
+ "replicas": "0,1,2",
+ "music_keyspace": "mdbctest4",
+ "music_address": "localhost",
+ "music_rfactor": "1",
+ "user": "root",
+ "password": "abc123",
+ "connections": [
+ {
+ "name": "Connection 0",
+ "url": "jdbc:mdbc://127.0.0.1:3306/mdbc",
+ "myid": "0"
+ },
+ {
+ "name": "Connection 1",
+ "url": "jdbc:mdbc://127.0.0.1:3306/mdbc2",
+ "myid": "1"
+ }
+ ]
+ },
+ {
+ "description": "H2 (DB #1) and MySQL (DB #2) with Cassandra2",
+ "MDBC_MUSIC_MIXIN": "cassandra2",
+ "replicas": "0,1",
+ "music_keyspace": "mdbctest5",
+ "music_address": "localhost",
+ "music_rfactor": "1",
+ "connections": [
+ {
+ "name": "Connection 0",
+ "MDBC_DB_MIXIN": "h2",
+ "url": "jdbc:mdbc:mem:db9",
+ "user": "",
+ "password": "",
+ "myid": "0"
+ },
+ {
+ "name": "Connection 1",
+ "MDBC_DB_MIXIN": "mysql",
+ "url": "jdbc:mdbc://127.0.0.1:3306/mdbc3",
+ "user": "root",
+ "password": "abc123",
+ "myid": "1"
+ }
+ ]
+ }
+ ],
+ "zconfigs": [
+ {
+ "description": "H2 Server with Cassandra2 with two connections",
+ "MDBC_DB_MIXIN": "h2server",
+ "MDBC_MUSIC_MIXIN": "cassandra2",
+ "replicas": "0,1",
+ "music_keyspace": "mdbctest3",
+ "music_address": "localhost",
+ "music_rfactor": "1",
+ "connections": [
+ {
+ "name": "Connection 0",
+ "url": "jdbc:mdbc:tcp://localhost/mdbc0",
+ "user": "",
+ "password": "",
+ "myid": "0"
+ },
+ {
+ "name": "Connection 1",
+ "url": "jdbc:mdbc:tcp://localhost/mdbc1",
+ "user": "",
+ "password": "",
+ "myid": "1"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/main/shell/mk_jboss_module b/src/main/shell/mk_jboss_module
new file mode 100755
index 0000000..28d0540
--- /dev/null
+++ b/src/main/shell/mk_jboss_module
@@ -0,0 +1,57 @@
+#!/bin/bash
+#
+# mk_jboss_module -- This script builds a JBoss module for MDBC. It should be run directly
+# in the directory containing the MDBC code. It will build the MDBC jar file (under
+# target/mdbc-jboss-module.tar), and then construct a tar file containing MDBC and all of
+# its dependencies, as well as other files needed for a JBoss module.
+#
+# To install the module: untar the tar file on the server in the JBOSS_DIR/modules directory.
+#
+
+if [ ! -f pom.xml ]
+then
+ echo mk_jboss_module: Where is pom.xml?
+ exit 1
+fi
+
+mvn -Dmaven.test.skip=true package
+if [ $? != 0 ]
+then
+ echo mk_jboss_module: maven failed...
+ exit 1
+fi
+
+T=/tmp/mk_jboss_module$$
+T2=$T/com/att/research/mdbc/main
+MODULE=$T2/module.xml
+TARGET=`pwd`/target/mdbc-jboss-module.tar
+JARS=$( mvn dependency:build-classpath | grep -v INFO | tr : '\012' )
+
+mkdir -p $T2
+cp $JARS $T2
+cp target/mdbc-0.0.1-SNAPSHOT.jar $T2
+JAR2=$( cd $T2; ls *.jar )
+
+cat > $MODULE <<EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Note: This module includes a copy of the H2 database, rather than depending on the
+ com.h2database.h2 module included with JBoss, because I could not get that copy to work.
+-->
+<module xmlns="urn:jboss:module:1.1" name="com.att.research.mdbc">
+ <resources>
+EOF
+for i in $JAR2; do echo " <resource-root path=\"$i\"/>"; done >> $MODULE
+cat >> $MODULE <<EOF
+ </resources>
+ <dependencies>
+ <module name="javax.api"/>
+ <module name="sun.jdk"/>
+ </dependencies>
+</module>
+EOF
+chmod 444 $T2/*.jar $MODULE
+
+mkdir -p target
+(cd $T; tar cf $TARGET com )
+rm -fr $T
diff --git a/src/main/shell/run_h2_server b/src/main/shell/run_h2_server
new file mode 100755
index 0000000..9b14e63
--- /dev/null
+++ b/src/main/shell/run_h2_server
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# run_h2_server -- This script starts the H2 DB server with all necessary jar files.
+# It should be run in the directory containing the MDBC code.
+#
+
+POM=pom-h2server.xml
+
+if [ ! -f $POM ]
+then
+ echo mk_jboss_module: Where is $POM?
+ exit 1
+fi
+
+mvn -f $POM -Dmaven.test.skip=true install
+if [ $? != 0 ]
+then
+ echo mk_jboss_module: maven failed...
+ exit 1
+fi
+
+export CLASSPATH=$( mvn -f $POM dependency:build-classpath | grep -v INFO )
+echo CLASSPATH is:
+echo $CLASSPATH | tr : '\012'
+
+cd /tmp
+java org.h2.tools.Server
diff --git a/src/test/java/com/att/research/mdbc/MDBCUtilsTest.java b/src/test/java/com/att/research/mdbc/MDBCUtilsTest.java
new file mode 100644
index 0000000..cdee078
--- /dev/null
+++ b/src/test/java/com/att/research/mdbc/MDBCUtilsTest.java
@@ -0,0 +1,71 @@
+package com.att.research.mdbc;
+
+import com.att.research.mdbc.mixins.OperationType;
+import com.att.research.mdbc.mixins.StagingTable;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+
+import static org.junit.Assert.*;
+
+public class MDBCUtilsTest {
+
+ @Test
+ public void toStringTest1() {
+ StagingTable table = new StagingTable();
+ table.addOperation("test",OperationType.INSERT,(new JSONObject(new String[]{"test1", "test2"})).toString(),(new JSONObject(new String[]{"test3", "Test4"})).toString());
+ String output=null;
+ try {
+ output = MDBCUtils.toString(table);
+ } catch (IOException e) {
+ e.printStackTrace();
+ fail();
+ }
+ assertTrue(output!=null);
+ assertTrue(!output.isEmpty());
+ }
+
+ @Test
+ public void toStringTest2() {
+ HashMap<String,StagingTable> mapToSerialize = new HashMap<>();
+ StagingTable table = new StagingTable();
+ table.addOperation("test",OperationType.INSERT,(new JSONObject(new String[]{"test1", "test2"})).toString(),(new JSONObject(new String[]{"test3", "Test4"})).toString());
+ mapToSerialize.put("table",table);
+ String output=null;
+ try {
+ output = MDBCUtils.toString(mapToSerialize);
+ } catch (IOException e) {
+ e.printStackTrace();
+ fail();
+ }
+ assertTrue(output!=null);
+ assertTrue(!output.isEmpty());
+ }
+
+ @Test
+ public void toStringTest3() {
+ String testStr = "test";
+ OperationType typeTest = OperationType.INSERT;
+ String output=null;
+ try {
+ output = MDBCUtils.toString(testStr);
+ } catch (IOException e) {
+ e.printStackTrace();
+ fail();
+ }
+ assertTrue(output!=null);
+ assertTrue(!output.isEmpty());
+ output=null;
+ try {
+ output = MDBCUtils.toString(typeTest);
+ } catch (IOException e) {
+ e.printStackTrace();
+ fail();
+ }
+ assertTrue(output!=null);
+ assertTrue(!output.isEmpty());
+ }
+} \ No newline at end of file
diff --git a/src/test/java/com/att/research/mdbc/test/ALLTESTS.java b/src/test/java/com/att/research/mdbc/test/ALLTESTS.java
new file mode 100755
index 0000000..c36e94c
--- /dev/null
+++ b/src/test/java/com/att/research/mdbc/test/ALLTESTS.java
@@ -0,0 +1,14 @@
+package com.att.research.mdbc.test;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+ //BasicTest.class,
+ //CrossSiteTest.class,
+ //TransactionTest.class
+})
+
+public class ALLTESTS {
+}
diff --git a/src/test/java/com/att/research/mdbc/test/BasicTest.java b/src/test/java/com/att/research/mdbc/test/BasicTest.java
new file mode 100755
index 0000000..2b17eba
--- /dev/null
+++ b/src/test/java/com/att/research/mdbc/test/BasicTest.java
@@ -0,0 +1,77 @@
+package com.att.research.mdbc.test;
+
+import static org.junit.Assert.*;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+
+/**
+ * This is a basic test which creates some tables, does a few selects, adn runs some joins.
+ * It is mainly intended to make sure that no exceptions are thrown in basic operation.
+ */
+public class BasicTest extends TestCommon {
+ private static final String DB_CONNECTION = "avatica://" + "mem:db1";
+ private static final String KEYSPACE = "Basic_Test";
+
+ //@Test
+ public void test() {
+ try {
+ Connection connection = getDBConnection(DB_CONNECTION, KEYSPACE, "0");
+ assertNotNull(connection);
+ System.out.println("GOT conn");
+ Statement stmt = connection.createStatement();
+ assertNotNull(stmt);
+ System.out.println("GOT stmt");
+
+ try {
+ connection.setAutoCommit(false);
+ stmt.execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))");
+ stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Anju', '111-22-3333')");
+ stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Sonia', '111-22-4444')");
+ stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Asha', '111-55-6666')");
+ dumptable(connection);
+
+ stmt.execute("DELETE FROM PERSON WHERE ID_ = '1'");
+ dumptable(connection);
+
+ stmt.execute("UPDATE PERSON SET NAME = 'foobar' WHERE ID_ = '2'");
+ dumptable(connection);
+
+ stmt.execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))");
+ stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')");
+ stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')");
+ stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')");
+ stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')");
+ stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')");
+ ResultSet rs = stmt.executeQuery("SELECT * FROM PERSON AS P, SONG AS S WHERE P.ID_ = S.ID_");
+ while (rs.next()) {
+ System.out.println("ID_ " + rs.getInt("ID_") + " Name: " + rs.getString("NAME") + " Aria: " + rs.getString("ARIA"));
+ }
+ rs.close();
+ stmt.close();
+ connection.commit();
+ } catch (Exception e) {
+ fail(e.toString());
+ } finally {
+ connection.close();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail(e.toString());
+ }
+ System.out.println("BasicTest.test OK");
+ }
+
+ private void dumptable(Connection connection) throws SQLException {
+ Statement stmt = connection.createStatement();
+ ResultSet rs = stmt.executeQuery("SELECT * FROM PERSON");
+ while (rs.next()) {
+ System.out.println("ID_ " + rs.getInt("ID_") + " Name " + rs.getString("name"));
+ }
+ stmt.close();
+ System.out.println("--");
+ }
+}
diff --git a/src/test/java/com/att/research/mdbc/test/CrossSiteTest.java b/src/test/java/com/att/research/mdbc/test/CrossSiteTest.java
new file mode 100755
index 0000000..71ac54e
--- /dev/null
+++ b/src/test/java/com/att/research/mdbc/test/CrossSiteTest.java
@@ -0,0 +1,447 @@
+package com.att.research.mdbc.test;
+
+import static org.junit.Assert.*;
+
+import java.io.Reader;
+import java.io.StringReader;
+import java.sql.CallableStatement;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.util.Random;
+
+import org.apache.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+/**
+ * This test tests a copy of data from DB1 to DB2. It tests the following H2 data types:
+ * VARCHAR, VARBINARY, INTEGER, BOOLEAN, DOUBLE, CLOB, TIMESTAMP.
+ */
+public class CrossSiteTest extends TestCommon {
+ private static final String DB_CONNECTION1 = "avatica://" + "mem:db1";
+ private static final String DB_CONNECTION2 = "avatica://" + "mem:db2";
+ private static final String KEYSPACE = "CrossSite_Test";
+ private final static Logger logger = Logger.getLogger(CrossSiteTest.class);
+
+ private Connection db1, db2;
+
+ //@BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ // drop the keyspace
+ }
+
+ //@Before
+ public void setUp() throws Exception {
+ db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0");
+ db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1");
+ }
+
+ //@After
+ public void tearDown() throws Exception {
+ db1.close();
+ db2.close();
+ }
+
+ //@Test
+ public void testCopyOneToTwo() {
+ String sql = "CREATE TABLE IF NOT EXISTS DATA(KEY VARCHAR(255), PRIMARY KEY (KEY))";
+ createTable(sql);
+
+ // Put data in DB1
+ try {
+ Statement s = db1.createStatement();
+ s.execute("INSERT INTO DATA(KEY) VALUES('AAA')");
+ s.execute("INSERT INTO DATA(KEY) VALUES('BBB')");
+ s.execute("INSERT INTO DATA(KEY) VALUES('CCC')");
+ s.execute("INSERT INTO DATA(KEY) VALUES('DDD')");
+ db1.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA");
+ if (rs.next()) {
+ int n = rs.getInt(1);
+ assertEquals(4, n);
+ } else {
+ fail("SELECT COUNT(*) produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ // Delete a row
+ try {
+ Statement s = db1.createStatement();
+ s.execute("DELETE FROM DATA WHERE KEY = 'CCC'");
+ db1.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Recheck
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA");
+ if (rs.next()) {
+ int n = rs.getInt(1);
+ assertEquals(3, n);
+ } else {
+ fail("SELECT COUNT(*) produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ System.out.println("CrossSiteTest.testCopyOneToTwo OK");
+ }
+
+ //@Test
+ public void testCopyWithPreparedStatement() {
+ String sql = "CREATE TABLE IF NOT EXISTS DATA2(KEY VARCHAR(255), PRIMARY KEY (KEY))";
+ createTable(sql);
+
+ // Put data in DB1
+ try {
+ Statement s = db1.createStatement();
+ PreparedStatement ps = db1.prepareStatement("INSERT INTO DATA2(KEY) VALUES(?)");
+ for (String v : new String[] { "WWW", "XXX", "YYY", "ZZZ" } ) {
+ ps.setString(1, v);
+ ps.execute();
+ }
+ db1.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA2");
+ if (rs.next()) {
+ int n = rs.getInt(1);
+ assertEquals(4, n);
+ } else {
+ fail("SELECT COUNT(*) produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ System.out.println("CrossSiteTest.testCopyWithPreparedStatement OK");
+ }
+
+ //@Test
+ public void testDataTypes() {
+ String sql = "CREATE TABLE IF NOT EXISTS DATATYPES(KEY VARCHAR(255), I1 INTEGER, B1 BOOLEAN, D1 DOUBLE, S1 VARCHAR, PRIMARY KEY (KEY))";
+ createTable(sql);
+
+ String key = "ThIs Is ThE KeY";
+ String key2 = "ThIs Is another KeY";
+ String s1 = "The Rain in Spain";
+ int i1 = 696969;
+ boolean b1 = true;
+ double pi = Math.PI;
+ double e = Math.E;
+
+ // Put data in DB1
+ try {
+ PreparedStatement ps = db1.prepareStatement("INSERT INTO DATATYPES(KEY, I1, B1, D1, S1) VALUES(?, ?, ?, ?, ?)");
+ ps.setString(1, key);
+ ps.setInt(2, i1);
+ ps.setBoolean(3, b1);
+ ps.setDouble(4, pi);
+ ps.setString(5, s1);
+ ps.execute();
+
+ ps.setString(1, key2);
+ ps.setInt(2, 123456);
+ ps.setBoolean(3, false);
+ ps.setDouble(4, e);
+ ps.setString(5, "Fee fi fo fum!");
+ ps.execute();
+ db1.commit();
+ ps.close();
+ } catch (Exception ex) {
+ fail("1: " + ex.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT * FROM DATATYPES");
+ if (rs.next()) {
+ assertEquals(key, rs.getString(1));
+ assertEquals(i1, rs.getInt(2));
+ assertEquals(b1, rs.getBoolean(3));
+ assertEquals(pi, rs.getDouble(4), 0.0);
+ assertEquals(s1, rs.getString(5));
+ } else {
+ fail("SELECT * FROM DATATYPES");
+ }
+ } catch (Exception ex) {
+ logger.error(ex);
+ ex.printStackTrace();
+ fail("2: " + ex.toString());
+ }
+ System.out.println("CrossSiteTest.testDataTypes OK");
+ }
+
+ //@Test
+ public void testIdentityColumn() {
+ String sql = "CREATE TABLE IF NOT EXISTS IDENTITYTEST(KEY IDENTITY, S1 VARCHAR, T1 TIMESTAMP, PRIMARY KEY (KEY))";
+ createTable(sql);
+
+ String s1 = "ThIs Is ThE IDENTITY test";
+ Timestamp ts = new Timestamp(-3535344000L);
+
+ // Put data in DB1
+ try {
+ PreparedStatement ps = db1.prepareStatement("INSERT INTO IDENTITYTEST(S1, T1) VALUES(?, ?)");
+ ps.setString(1, s1);
+ ps.setTimestamp(2, ts);
+ ps.execute();
+ db1.commit();
+ ps.close();
+ } catch (Exception ex) {
+ fail("testIdentity 1: " + ex.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT * FROM IDENTITYTEST");
+ if (rs.next()) {
+ assertEquals(s1, rs.getString("s1"));
+ assertEquals(ts, rs.getTimestamp("t1"));
+ } else {
+ fail("SELECT * FROM DATATYPES");
+ }
+ } catch (Exception ex) {
+ logger.error(ex);
+ ex.printStackTrace();
+ fail("testIdentity 2: " + ex.toString());
+ }
+ System.out.println("CrossSiteTest.testIdentityColumn OK");
+ }
+
+ //@Test
+ public void testBLOBColumn() {
+ String sql = "CREATE TABLE IF NOT EXISTS BLOBTEST (KEY VARCHAR, V1 VARBINARY, C1 CLOB, PRIMARY KEY (KEY))";// add
+ createTable(sql);
+
+ String key = "BLOB test";
+ byte[] v1 = new byte[4096];
+ new Random().nextBytes(v1);
+ String constitution =
+ "We the People of the United States, in Order to form a more perfect Union, establish Justice, insure domestic Tranquility, provide for the common defense, promote the "+
+ "general Welfare, and secure the Blessings of Liberty to ourselves and our Posterity, do ordain and establish this Constitution for the United States of America."+
+ "Section 1"+
+ "All legislative Powers herein granted shall be vested in a Congress of the United States, which shall consist of a Senate and House of Representatives."+
+ ""+
+ "Section 2"+
+ "1: The House of Representatives shall be composed of Members chosen every second Year by the People of the several States, and the Electors in each State shall "+
+ "have the Qualifications requisite for Electors of the most numerous Branch of the State Legislature."+
+ ""+
+ "2: No Person shall be a Representative who shall not have attained to the Age of twenty five Years, and been seven Years a Citizen of the United States, "+
+ "and who shall not, when elected, be an Inhabitant of that State in which he shall be chosen."+
+ ""+
+ "3: Representatives and direct Taxes shall be apportioned among the several States which may be included within this Union, according to their respective Numbers, which shall be determined "+
+ "by adding to the whole Number of free Persons, including those bound to Service for a Term of Years, and excluding Indians not taxed, three fifths of all other Persons. "+
+ "2 The actual Enumeration shall be made within three Years after the first Meeting of the Congress of the United States, and within every subsequent Term of ten Years, in such Manner as "+
+ "they shall by Law direct. The Number of Representatives shall not exceed one for every thirty Thousand, but each State shall have at Least one Representative; and until such enumeration "+
+ "shall be made, the State of New Hampshire shall be entitled to chuse three, Massachusetts eight, Rhode-Island and Providence Plantations one, Connecticut five, New-York six, New Jersey four, "+
+ "Pennsylvania eight, Delaware one, Maryland six, Virginia ten, North Carolina five, South Carolina five, and Georgia three."+
+ ""+
+ "4: When vacancies happen in the Representation from any State, the Executive Authority thereof shall issue Writs of Election to fill such Vacancies."+
+ ""+
+ "5: The House of Representatives shall chuse their Speaker and other Officers; and shall have the sole Power of Impeachment."+
+ "etc., etc. ...";
+ Reader c1 = new StringReader(constitution);
+
+ // Put data in DB1
+ try {
+ CallableStatement ps = db1.prepareCall("INSERT INTO BLOBTEST(KEY, V1, C1) VALUES (?, ?, ?)");
+ ps.setString(1, key);
+ ps.setBytes(2, v1);
+ ps.setClob(3, c1);
+ ps.execute();
+ db1.commit();
+ ps.close();
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ fail("testBLOBColumn 1: " + ex.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT * FROM BLOBTEST");
+ if (rs.next()) {
+ String v1s = new String(v1);
+ assertEquals(key, rs.getString("key"));
+ assertEquals(v1s, new String(rs.getBytes("v1")));
+ assertEquals(constitution, new String(rs.getBytes("c1")));
+ } else {
+ fail("SELECT * FROM BLOBTEST");
+ }
+ } catch (Exception ex) {
+ logger.error(ex);
+ ex.printStackTrace();
+ fail("testBLOBColumn 2: " + ex.toString());
+ }
+ System.out.println("CrossSiteTest.testBLOBColumn OK");
+ }
+
+ //@Test
+ public void testSecondaryIndex() {
+ String sql = "CREATE TABLE IF NOT EXISTS ARTISTS (ARTIST VARCHAR, GENRE VARCHAR, AGE INT, PRIMARY KEY (ARTIST))";
+ createTable(sql);
+
+ // Put data in DB1
+ try {
+ Statement s = db1.createStatement();
+ s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Anne-Sophie', 'classical', 53)");
+ s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Dizz', 'jazz', 99)");
+ s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Esperanza', 'jazz', 32)");
+ s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Miles', 'jazz', 90)");
+ s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Yo-yo', 'classical', 61)");
+ s.execute("CREATE INDEX BYGENRE on ARTISTS(GENRE)");
+ db1.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM ARTISTS WHERE GENRE = 'jazz'");
+ if (rs.next()) {
+ int n = rs.getInt(1);
+ assertEquals(3, n);
+ } else {
+ fail("SELECT COUNT(*) produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ // Delete a row
+ try {
+ Statement s = db1.createStatement();
+ s.execute("DELETE FROM ARTISTS WHERE ARTIST = 'Miles'");
+ db1.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Recheck
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM ARTISTS WHERE GENRE = 'jazz'");
+ if (rs.next()) {
+ int n = rs.getInt(1);
+ assertEquals(2, n);
+ } else {
+ fail("SELECT COUNT(*) produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ System.out.println("CrossSiteTest.testSecondaryIndex OK");
+ }
+
+ //@Test
+ public void testUpdate() {
+ String sql = "CREATE TABLE IF NOT EXISTS UPDATETEST(KEY VARCHAR(255), OTHER VARCHAR(255), PRIMARY KEY (KEY))";
+ createTable(sql);
+
+ // Put data in DB1
+ try {
+ Statement s = db1.createStatement();
+ s.execute("INSERT INTO UPDATETEST(KEY, OTHER) VALUES('foo', 'bar')");
+ s.execute("INSERT INTO UPDATETEST(KEY, OTHER) VALUES('bar', 'nixon')");
+ db1.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Get data in DB2
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db2.createStatement();
+ ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM UPDATETEST");
+ if (rs.next()) {
+ int n = rs.getInt(1);
+ assertEquals(2, n);
+ } else {
+ fail("SELECT COUNT(*) produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ // Update a row
+ try {
+ Statement s = db2.createStatement();
+ s.execute("UPDATE UPDATETEST SET OTHER = 'obama' WHERE KEY = 'bar'");
+ db2.commit();
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ // Recheck
+ logger.info(" Get data in DB2");
+ try {
+ Statement s = db1.createStatement();
+ ResultSet rs = s.executeQuery("SELECT OTHER FROM UPDATETEST WHERE KEY = 'bar'");
+ if (rs.next()) {
+ String str = rs.getString("OTHER");
+ assertEquals("obama", str);
+ } else {
+ fail("SELECT OTHER produced no result");
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ System.out.println("CrossSiteTest.testUpdate OK");
+ }
+
+ private void createTable(String sql) {
+ try {
+ for (Connection db : new Connection[] { db1, db2 }) {
+ logger.info(" start: "+db);
+ Statement s = db.createStatement();
+ s.execute(sql);
+ db.commit();
+ s.close();
+ logger.info(" Tables created");
+ }
+ } catch (Exception e) {
+ fail(e.toString());
+ }
+ }
+}
diff --git a/src/test/java/com/att/research/mdbc/test/TestCommon.java b/src/test/java/com/att/research/mdbc/test/TestCommon.java
new file mode 100755
index 0000000..e5e85dc
--- /dev/null
+++ b/src/test/java/com/att/research/mdbc/test/TestCommon.java
@@ -0,0 +1,25 @@
+package com.att.research.mdbc.test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Properties;
+
+import com.att.research.mdbc.mixins.CassandraMixin;
+
+public class TestCommon {
+ public static final String DB_DRIVER = "avatica.Driver";
+ public static final String DB_USER = "";
+ public static final String DB_PASSWORD = "";
+
+ public Connection getDBConnection(String url, String keyspace, String id) throws SQLException, ClassNotFoundException {
+ Class.forName(DB_DRIVER);
+ Properties driver_info = new Properties();
+ driver_info.put(CassandraMixin.KEY_MY_ID, id);
+ driver_info.put(CassandraMixin.KEY_REPLICAS, "0,1,2");
+ driver_info.put(CassandraMixin.KEY_MUSIC_ADDRESS, "localhost");
+ driver_info.put("user", DB_USER);
+ driver_info.put("password", DB_PASSWORD);
+ return DriverManager.getConnection(url, driver_info);
+ }
+}
diff --git a/src/test/java/com/att/research/mdbc/test/TransactionTest.java b/src/test/java/com/att/research/mdbc/test/TransactionTest.java
new file mode 100755
index 0000000..9d50db5
--- /dev/null
+++ b/src/test/java/com/att/research/mdbc/test/TransactionTest.java
@@ -0,0 +1,164 @@
+package com.att.research.mdbc.test;
+
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+
+public class TransactionTest extends TestCommon {
+ private static final String DB_CONNECTION1 = "avatica://" + "mem:db1";
+ private static final String DB_CONNECTION2 = "avatica://" + "mem:db2";
+ private static final String KEYSPACE = "CrossSite_Test";
+ private final static Logger logger = Logger.getLogger(CrossSiteTest.class);
+
+ //@Test
+ public void testWithAutocommitTrue() {
+ System.out.println("START TransactionTest.testWithAutocommitTrue");
+ Set<String> vals = new HashSet<String>(Arrays.asList("1", "2", "3"));
+ Connection db1 = null, db2 = null;
+ try {
+ db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0");
+ db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1");
+ createTable(new Connection[] { db1, db2 });
+ db1.setAutoCommit(true);
+ insert(db1, vals);
+ readcheck(db2, vals);
+ } catch (Exception e) {
+ fail("Unexpected exception: "+e);
+ } finally {
+ try {
+ if (db1 != null)
+ db1.close();
+ if (db2 != null)
+ db2.close();
+ } catch (SQLException e) {
+ // ignore
+ }
+ }
+ }
+ //@Test
+ public void testCommit() {
+ System.out.println("START TransactionTest.testCommit");
+ Set<String> vals = new HashSet<String>(Arrays.asList("1", "2", "3", "4"));
+ Set<String> val2 = new HashSet<String>(Arrays.asList("1", "2", "4"));
+ Connection db1 = null, db2 = null;
+ try {
+ db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0");
+ db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1");
+ createTable(new Connection[] { db1, db2 });
+ db1.setAutoCommit(false);
+ insert(db1, vals);
+ delete(db1, new HashSet<String>(Arrays.asList("3")));
+ readcheck(db1, val2);
+ readcheck(db2, new HashSet<String>());
+ db1.commit();
+ readcheck(db2, val2);
+ } catch (Exception e) {
+ fail("Unexpected exception: "+e);
+ } finally {
+ try {
+ if (db1 != null)
+ db1.close();
+ if (db2 != null)
+ db2.close();
+ } catch (SQLException e) {
+ // ignore
+ }
+ }
+ }
+ //@Test
+ public void testRollback() {
+ System.out.println("START TransactionTest.testRollback");
+ Set<String> vals = new HashSet<String>(Arrays.asList("1", "2", "3", "4"));
+ Connection db1 = null, db2 = null;
+ try {
+ db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0");
+ db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1");
+ createTable(new Connection[] { db1, db2 });
+ db1.setAutoCommit(false);
+ insert(db1, vals);
+ readcheck(db1, vals);
+ readcheck(db2, new HashSet<String>());
+ db1.rollback();
+ readcheck(db1, new HashSet<String>());
+ readcheck(db2, new HashSet<String>());
+ } catch (Exception e) {
+ fail("Unexpected exception: "+e);
+ } finally {
+ try {
+ if (db1 != null)
+ db1.close();
+ if (db2 != null)
+ db2.close();
+ } catch (SQLException e) {
+ // ignore
+ }
+ }
+ }
+ private void createTable(Connection[] c) {
+ try {
+ for (Connection db : c) {
+ logger.info(" start: "+db);
+ Statement s = db.createStatement();
+ s.execute("CREATE TABLE IF NOT EXISTS TRANSTEST(KEY VARCHAR(255), PRIMARY KEY (KEY))");
+ s.close();
+ logger.info(" Tables created");
+ }
+ } catch (Exception e) {
+ fail(e.toString());
+ }
+ }
+ private void insert(Connection db, Set<String> vals) {
+ // Put data in DB1
+ try {
+ Statement s = db.createStatement();
+ for (String v : vals)
+ s.execute("INSERT INTO TRANSTEST(KEY) VALUES('"+v+"')");
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ }
+ private void delete(Connection db, Set<String> vals) {
+ // Put data in DB1
+ try {
+ Statement s = db.createStatement();
+ for (String v : vals)
+ s.execute("DELETE FROM TRANSTEST WHERE KEY = '"+v+"'");
+ s.close();
+ } catch (Exception e) {
+ fail("1: " + e.toString());
+ }
+ }
+ private void readcheck(Connection db, Set<String> vals) {
+ try {
+ Statement s = db.createStatement();
+ ResultSet rs = s.executeQuery("SELECT * FROM TRANSTEST");
+ Set<String> newset = new HashSet<String>();
+ while (rs.next()) {
+ String tmp = rs.getString(1);
+ newset.add(tmp);
+ }
+ if (vals.size() != newset.size()) {
+ fail("wrong number of elements, expected "+vals.size()+" got "+newset.size());
+ }
+ for (String t : vals) {
+ if (!newset.contains(t))
+ fail("missing element: "+t);
+ }
+ } catch (Exception e) {
+ logger.error(e);
+ e.printStackTrace();
+ fail("2: " + e.toString());
+ }
+ }
+}
diff --git a/src/test/java/org/openecomp/sdnc/sli/resource/dblib/CachedDataSource.java b/src/test/java/org/openecomp/sdnc/sli/resource/dblib/CachedDataSource.java
new file mode 100755
index 0000000..222dfb2
--- /dev/null
+++ b/src/test/java/org/openecomp/sdnc/sli/resource/dblib/CachedDataSource.java
@@ -0,0 +1,7 @@
+package org.openecomp.sdnc.sli.resource.dblib;
+
+public class CachedDataSource {
+ public String getDbConnectionName() {
+ return "name";
+ }
+}
diff --git a/src/test/java/org/openecomp/sdnc/sli/resource/dblib/DBResourceManager.java b/src/test/java/org/openecomp/sdnc/sli/resource/dblib/DBResourceManager.java
new file mode 100755
index 0000000..e54a5a2
--- /dev/null
+++ b/src/test/java/org/openecomp/sdnc/sli/resource/dblib/DBResourceManager.java
@@ -0,0 +1,87 @@
+package org.openecomp.sdnc.sli.resource.dblib;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Properties;
+import java.util.Queue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import javax.sql.rowset.CachedRowSet;
+import javax.sql.rowset.RowSetProvider;
+
+public class DBResourceManager {
+ public static final String DB_CONNECTION = "jdbc:mdbc:file:/tmp/stresstest"; // "jdbc:h2:mem:db1";
+ @SuppressWarnings("unused")
+ private Properties p;
+ private Queue<Connection> conns;
+
+ private DBResourceManager(Properties p) {
+ this.p = p;
+ this.conns = new LinkedBlockingQueue<Connection>();
+ }
+ public static DBResourceManager create(Properties props) throws Exception {
+ DBResourceManager dbmanager = new DBResourceManager(props);
+ return dbmanager;
+ }
+ public Connection getConnection() throws SQLException {
+ if (conns.size() > 0) {
+ return conns.remove();
+ } else {
+ Properties driver_info = new Properties();
+ return DriverManager.getConnection(DB_CONNECTION, driver_info);
+ }
+ }
+ public void cleanUp() {
+ try {
+ while (conns.size() > 0) {
+ Connection conn = conns.remove();
+ conn.close();
+ }
+ } catch (SQLException e) {
+ }
+ }
+ public boolean isActive() {
+ return true;
+ }
+ public boolean writeData(String statement, List<String> arguments, String preferredDS) throws SQLException {
+ Connection conn = getConnection();
+ PreparedStatement ps = conn.prepareStatement(statement);
+ for (int i = 1; i <= arguments.size(); i++) {
+ ps.setObject(i, arguments.get(i-1));
+ }
+ ps.executeUpdate();
+ ps.close();
+ conns.add(conn);
+ return true;
+ }
+ public CachedRowSet getData(String statement, List<String> arguments, String preferredDS) throws SQLException {
+ CachedRowSet data = null;
+ ResultSet rs = null;
+ Connection conn = null;
+ try {
+ data = RowSetProvider.newFactory().createCachedRowSet();
+ conn = getConnection();
+ PreparedStatement ps = conn.prepareStatement(statement);
+ if(arguments != null) {
+ for (int i = 0; i < arguments.size(); i++) {
+ ps.setObject(i+1, arguments.get(i));
+ }
+ }
+ rs = ps.executeQuery();
+ data.populate(rs);
+ } catch (Throwable exc) {
+ throw (SQLException)exc;
+ } finally {
+ if (conn != null)
+ conns.add(conn);
+ }
+ return data;
+ }
+ CachedDataSource findMaster() throws Exception {
+ return new CachedDataSource();
+ }
+}
diff --git a/src/test/java/org/openecomp/sdnc/sli/resource/dblib/StressTest.java b/src/test/java/org/openecomp/sdnc/sli/resource/dblib/StressTest.java
new file mode 100755
index 0000000..a1eb9a6
--- /dev/null
+++ b/src/test/java/org/openecomp/sdnc/sli/resource/dblib/StressTest.java
@@ -0,0 +1,225 @@
+package org.openecomp.sdnc.sli.resource.dblib;
+
+import static org.junit.Assert.*;
+
+import java.net.URL;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import javax.sql.rowset.CachedRowSet;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.anarsoft.vmlens.concurrent.junit.ConcurrentTestRunner;
+import com.anarsoft.vmlens.concurrent.junit.ThreadCount;
+
+//@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+@RunWith(ConcurrentTestRunner.class)
+public class StressTest {
+
+// static {
+// System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "INFO");
+// System.setProperty(org.slf4j.impl.SimpleLogger.LOG_FILE_KEY, String.format("ComparativeAnalysisTest-%d.log", System.currentTimeMillis()));
+// }
+ private static final Logger LOG = LoggerFactory.getLogger(StressTest.class);
+ private static Properties props;
+ private static DBResourceManager jdbcDataSource = null;
+ @SuppressWarnings("unused")
+ private static final int MAX_TREADS = 1;
+ @SuppressWarnings("unused")
+ private static final int MAX_ITERATIONS = 10;
+
+ private final AtomicInteger count= new AtomicInteger();
+
+ Set<Thread> runningThreads = new HashSet<Thread>();
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ props = new Properties();
+ URL url = StressTest.class.getResource("/dblib.properties");
+ assertNotNull(url);
+ LOG.info("Property file is: " + url.toString());
+ props.load(url.openStream());
+
+ try {
+ jdbcDataSource = DBResourceManager.create(props);
+ Connection conn = jdbcDataSource.getConnection();
+
+ // ---------------
+ // CREATE TABLE
+ String sql =
+ "CREATE TABLE IF NOT EXISTS `AIC_SITE` (" +
+ "`name` varchar(100) DEFAULT NULL, "+
+ "`aic_site_id` varchar(100) NOT NULL, "+
+ "`vcenter_url` varchar(200) DEFAULT NULL, "+
+ "`vcenter_username` varchar(40) DEFAULT NULL, "+
+ "`vcenter_passwd` varchar(255) DEFAULT NULL, "+
+ "`city` varchar(100) DEFAULT NULL, "+
+ "`state` varchar(2) DEFAULT NULL, "+
+ "`operational_status` varchar(20) DEFAULT NULL, "+
+ "`oam_gateway_addr` varchar(20) DEFAULT '', "+
+ "PRIMARY KEY (`aic_site_id`) "+
+ ") ; ";
+ Statement stmt = conn.createStatement();
+ stmt.execute(sql);
+ // ---------------
+
+ conn.close();
+ } catch (Throwable exc) {
+ LOG.error("", exc);
+ }
+ assertNotNull(jdbcDataSource);
+ if (((DBResourceManager)jdbcDataSource).isActive()){
+ LOG.warn( "DBLIB: JDBC DataSource has been initialized.");
+ } else {
+ LOG.warn( "DBLIB: JDBC DataSource did not initialize successfully.");
+ }
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ jdbcDataSource.cleanUp();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+
+ }
+
+ @After
+ public void tearDown() throws Exception {
+
+ }
+
+// @Test
+ public void test01() {
+ LOG.info("TEST 1: Verify primary db selection");
+ checkPrimaryDatabase();
+ }
+
+
+ @Test
+ @ThreadCount(10)
+ public void test0X() {
+ int id = count.incrementAndGet();
+
+ String siteid = String.format("Councurrent-tester-%02d", id);
+ for(int i=0; i<40; i++){
+ String site = String.format("%s_%04d", siteid, i);
+ insertTestData(site);
+ queryTestData(site);
+ removeTestData(site);
+ try {
+ Thread.sleep(0);
+ } catch (Exception e) {
+ LOG.warn("", e);
+ }
+ }
+ }
+
+ private void removeTestData(String site) {
+ ArrayList<String> delete = new ArrayList<String>();
+ delete.add(site);
+ try {
+ long startTime = System.currentTimeMillis();
+ boolean success = jdbcDataSource.writeData("delete from AIC_SITE where aic_site_id=?", delete, null);
+ logRequest(site, "DELETE", startTime, System.currentTimeMillis() - startTime);
+ assertTrue(success);
+ } catch (SQLException e) {
+ LOG.warn("", e);
+
+ }
+ }
+
+ private boolean queryTestData(String site) {
+ ArrayList<String> identifier = new ArrayList<String>();
+ identifier.add(site);
+ try {
+ int rowcount = 0;
+ long startTime = System.currentTimeMillis();
+ CachedRowSet data = jdbcDataSource.getData("select * from AIC_SITE where aic_site_id=?", identifier, null);
+ logRequest(site, "QUERY", startTime, System.currentTimeMillis() - startTime);
+ while(data.next()) {
+ rowcount ++;
+ }
+ return rowcount!=0;
+// assertTrue(success);
+ } catch (SQLException e) {
+ LOG.warn("", e);
+ return false;
+ }
+ }
+
+
+ private void insertTestData(String site) {
+ ArrayList<String> data = new ArrayList<String>();
+ data.add(site);
+ data.add(site);
+ data.add("Sample03");
+ data.add("Sample04");
+ data.add("Sample05");
+
+ boolean success;
+ try {
+ long startTime = System.currentTimeMillis();
+ success = jdbcDataSource.writeData("insert into AIC_SITE (name, aic_site_id, vcenter_url, vcenter_username, vcenter_passwd) values (?,?,?,?,?)", data, null);
+ logRequest(site, "INSERT", startTime, System.currentTimeMillis() - startTime);
+ assertTrue(success);
+ } catch (SQLException e) {
+ LOG.warn("", e);
+ }
+ }
+
+ private void checkPrimaryDatabase() {
+ Connection conn = null;
+ PreparedStatement statement = null;
+ ResultSet rs = null;
+
+ try {
+ conn = jdbcDataSource.getConnection();
+ statement = conn.prepareStatement("SELECT 1 FROM DUAL");
+ rs = statement.executeQuery();
+ int value = -1;
+ while(rs.next()) {
+ value = rs.getInt(1);
+ }
+ LOG.info("Value returned is: " + value);
+ conn.close();
+ } catch (SQLException e) {
+ LOG.warn("transaction failed", e);
+ } finally {
+ try {
+ if(rs != null) { rs.close(); }
+ if(conn != null){ conn.close(); }
+ if(conn != null){ conn.close(); }
+ } catch (SQLException e) {
+ LOG.warn("transaction failed", e);
+ }
+ }
+ CachedDataSource ds = null;
+ try {
+ ds = jdbcDataSource.findMaster();
+ } catch (Throwable e) {
+ LOG.warn("", e);
+ }
+ LOG.info("Primary DS is " + ds.getDbConnectionName());
+ }
+ private static void logRequest(String site, String command, long timestamp, long duration) {
+ LOG.info(String.format("%s|%s|%d|%d", site, command, timestamp, duration));
+ }
+}
diff --git a/src/test/resources/dblib.properties b/src/test/resources/dblib.properties
new file mode 100755
index 0000000..2fe520e
--- /dev/null
+++ b/src/test/resources/dblib.properties
@@ -0,0 +1,9 @@
+org.openecomp.sdnc.sli.dbtype=jdbc
+
+org.openecomp.sdnc.sli.jdbc.url=jdbc:mysql://dbhost:3306/sdnctl
+org.openecomp.sdnc.sli.jdbc.database=sdnctl
+org.openecomp.sdnc.sli.jdbc.user=dbuser
+org.openecomp.sdnc.sli.jdbc.password=dbpassword
+org.openecomp.sdnc.sli.jdbc.connection.name=sdnctldb
+
+
diff --git a/src/test/resources/simplelogger.properties b/src/test/resources/simplelogger.properties
new file mode 100755
index 0000000..384f97b
--- /dev/null
+++ b/src/test/resources/simplelogger.properties
@@ -0,0 +1,34 @@
+# SLF4J's SimpleLogger configuration file
+# Simple implementation of Logger that sends all enabled log messages, for all defined loggers, to System.err.
+
+# Default logging detail level for all instances of SimpleLogger.
+# Must be one of ("trace", "debug", "info", "warn", or "error").
+# If not specified, defaults to "info".
+org.slf4j.simpleLogger.defaultLogLevel=debug
+
+# Logging detail level for a SimpleLogger instance named "xxxxx".
+# Must be one of ("trace", "debug", "info", "warn", or "error").
+# If not specified, the default logging detail level is used.
+#org.slf4j.simpleLogger.log.xxxxx=
+
+# Set to true if you want the current date and time to be included in output messages.
+# Default is false, and will output the number of milliseconds elapsed since startup.
+#org.slf4j.simpleLogger.showDateTime=false
+
+# The date and time format to be used in the output messages.
+# The pattern describing the date and time format is the same that is used in java.text.SimpleDateFormat.
+# If the format is not specified or is invalid, the default format is used.
+# The default format is yyyy-MM-dd HH:mm:ss:SSS Z.
+#org.slf4j.simpleLogger.dateTimeFormat=yyyy-MM-dd HH:mm:ss:SSS Z
+
+# Set to true if you want to output the current thread name.
+# Defaults to true.
+#org.slf4j.simpleLogger.showThreadName=true
+
+# Set to true if you want the Logger instance name to be included in output messages.
+# Defaults to true.
+#org.slf4j.simpleLogger.showLogName=true
+
+# Set to true if you want the last component of the name to be included in output messages.
+# Defaults to false.
+#org.slf4j.simpleLogger.showShortLogName=false \ No newline at end of file