From 6b1bf7d48b3e0c0f775a1a13c082ee52cfda4756 Mon Sep 17 00:00:00 2001 From: Arthur Martella Date: Mon, 29 Oct 2018 10:35:33 -0400 Subject: Migrate MDBC code to org.onap.music Patch set 2 now includes moving files in src/test/java Change-Id: Ic722bed9574f75d90b5b582247fec61084772cb8 Issue-ID: MUSIC-155 Signed-off-by: Arthur Martella --- README.md | 24 +- pom.xml | 2 +- .../research/exceptions/MDBCServiceException.java | 88 -- .../att/research/exceptions/QueryException.java | 90 -- .../att/research/logging/EELFLoggerDelegate.java | 339 ------ .../att/research/logging/format/AppMessages.java | 156 --- .../att/research/logging/format/ErrorSeverity.java | 37 - .../att/research/logging/format/ErrorTypes.java | 44 - .../java/com/att/research/mdbc/ArchiveProcess.java | 42 - .../java/com/att/research/mdbc/Configuration.java | 18 - .../com/att/research/mdbc/DatabaseOperations.java | 465 -------- .../com/att/research/mdbc/DatabasePartition.java | 189 --- src/main/java/com/att/research/mdbc/LockId.java | 46 - src/main/java/com/att/research/mdbc/MDBCUtils.java | 70 -- .../att/research/mdbc/MdbcCallableStatement.java | 738 ------------ .../java/com/att/research/mdbc/MdbcConnection.java | 419 ------- .../att/research/mdbc/MdbcPreparedStatement.java | 743 ------------ .../java/com/att/research/mdbc/MdbcServer.java | 162 --- .../com/att/research/mdbc/MdbcServerLogic.java | 312 ----- .../java/com/att/research/mdbc/MdbcStatement.java | 416 ------- .../com/att/research/mdbc/MusicSqlManager.java | 308 ----- .../java/com/att/research/mdbc/ProxyStatement.java | 1262 -------------------- src/main/java/com/att/research/mdbc/Range.java | 34 - src/main/java/com/att/research/mdbc/RedoRow.java | 29 - .../java/com/att/research/mdbc/StateManager.java | 209 ---- src/main/java/com/att/research/mdbc/TableInfo.java | 75 -- .../mdbc/configurations/NodeConfiguration.java | 71 -- .../mdbc/configurations/TablesConfiguration.java | 179 --- .../att/research/mdbc/configurations/config-0.json | 16 - .../att/research/mdbc/configurations/ranges.json | 14 - .../mdbc/configurations/tableConfiguration.json | 19 - .../att/research/mdbc/examples/EtdbTestClient.java | 125 -- .../att/research/mdbc/mixins/Cassandra2Mixin.java | 287 ----- .../att/research/mdbc/mixins/CassandraMixin.java | 1261 ------------------- .../com/att/research/mdbc/mixins/DBInterface.java | 92 -- .../com/att/research/mdbc/mixins/MixinFactory.java | 125 -- .../att/research/mdbc/mixins/MusicConnector.java | 124 -- .../att/research/mdbc/mixins/MusicInterface.java | 173 --- .../com/att/research/mdbc/mixins/MusicMixin.java | 233 ---- .../com/att/research/mdbc/mixins/MySQLMixin.java | 786 ------------ .../java/com/att/research/mdbc/mixins/Utils.java | 220 ---- .../com/att/research/mdbc/mixins/package-info.java | 47 - .../java/com/att/research/mdbc/package-info.java | 87 -- .../com/att/research/mdbc/tables/MriReference.java | 14 - .../mdbc/tables/MusicRangeInformationRow.java | 16 - .../att/research/mdbc/tables/MusixTxDigestId.java | 15 - .../com/att/research/mdbc/tables/Operation.java | 28 - .../att/research/mdbc/tables/OperationType.java | 5 - .../research/mdbc/tables/PartitionInformation.java | 11 - .../com/att/research/mdbc/tables/StagingTable.java | 51 - .../att/research/mdbc/tables/TxCommitProgress.java | 206 ---- .../att/research/mdbc/tests/ConnectionTest.java | 419 ------- .../java/com/att/research/mdbc/tests/MAIN.java | 106 -- .../java/com/att/research/mdbc/tests/Test.java | 105 -- .../com/att/research/mdbc/tests/Test_Delete.java | 70 -- .../com/att/research/mdbc/tests/Test_Insert.java | 94 -- .../att/research/mdbc/tests/Test_Transactions.java | 74 -- .../com/att/research/mdbc/tests/package-info.java | 165 --- .../mdbc/tools/CreateNodeConfigurations.java | 70 -- .../att/research/mdbc/tools/CreatePartition.java | 59 - .../music/exceptions/MDBCServiceException.java | 88 ++ .../org/onap/music/exceptions/QueryException.java | 90 ++ .../org/onap/music/logging/EELFLoggerDelegate.java | 339 ++++++ .../org/onap/music/logging/format/AppMessages.java | 156 +++ .../onap/music/logging/format/ErrorSeverity.java | 37 + .../org/onap/music/logging/format/ErrorTypes.java | 44 + .../java/org/onap/music/mdbc/ArchiveProcess.java | 42 + .../java/org/onap/music/mdbc/Configuration.java | 18 + .../org/onap/music/mdbc/DatabaseOperations.java | 465 ++++++++ .../org/onap/music/mdbc/DatabasePartition.java | 189 +++ src/main/java/org/onap/music/mdbc/LockId.java | 46 + src/main/java/org/onap/music/mdbc/MDBCUtils.java | 70 ++ .../org/onap/music/mdbc/MdbcCallableStatement.java | 738 ++++++++++++ .../java/org/onap/music/mdbc/MdbcConnection.java | 419 +++++++ .../org/onap/music/mdbc/MdbcPreparedStatement.java | 743 ++++++++++++ src/main/java/org/onap/music/mdbc/MdbcServer.java | 162 +++ .../java/org/onap/music/mdbc/MdbcServerLogic.java | 312 +++++ .../java/org/onap/music/mdbc/MdbcStatement.java | 416 +++++++ .../java/org/onap/music/mdbc/MusicSqlManager.java | 308 +++++ .../java/org/onap/music/mdbc/ProxyStatement.java | 1262 ++++++++++++++++++++ src/main/java/org/onap/music/mdbc/Range.java | 34 + src/main/java/org/onap/music/mdbc/RedoRow.java | 29 + .../java/org/onap/music/mdbc/StateManager.java | 209 ++++ src/main/java/org/onap/music/mdbc/TableInfo.java | 75 ++ .../mdbc/configurations/NodeConfiguration.java | 71 ++ .../mdbc/configurations/TablesConfiguration.java | 179 +++ .../onap/music/mdbc/configurations/config-0.json | 16 + .../org/onap/music/mdbc/configurations/ranges.json | 14 + .../mdbc/configurations/tableConfiguration.json | 19 + .../onap/music/mdbc/examples/EtdbTestClient.java | 125 ++ .../onap/music/mdbc/mixins/Cassandra2Mixin.java | 287 +++++ .../org/onap/music/mdbc/mixins/CassandraMixin.java | 1261 +++++++++++++++++++ .../org/onap/music/mdbc/mixins/DBInterface.java | 92 ++ .../org/onap/music/mdbc/mixins/MixinFactory.java | 125 ++ .../org/onap/music/mdbc/mixins/MusicConnector.java | 124 ++ .../org/onap/music/mdbc/mixins/MusicInterface.java | 173 +++ .../org/onap/music/mdbc/mixins/MusicMixin.java | 233 ++++ .../org/onap/music/mdbc/mixins/MySQLMixin.java | 786 ++++++++++++ .../java/org/onap/music/mdbc/mixins/Utils.java | 220 ++++ .../org/onap/music/mdbc/mixins/package-info.java | 47 + .../java/org/onap/music/mdbc/package-info.java | 87 ++ .../org/onap/music/mdbc/tables/MriReference.java | 14 + .../mdbc/tables/MusicRangeInformationRow.java | 16 + .../onap/music/mdbc/tables/MusixTxDigestId.java | 15 + .../java/org/onap/music/mdbc/tables/Operation.java | 28 + .../org/onap/music/mdbc/tables/OperationType.java | 5 + .../music/mdbc/tables/PartitionInformation.java | 11 + .../org/onap/music/mdbc/tables/StagingTable.java | 51 + .../onap/music/mdbc/tables/TxCommitProgress.java | 206 ++++ .../org/onap/music/mdbc/tests/ConnectionTest.java | 419 +++++++ src/main/java/org/onap/music/mdbc/tests/MAIN.java | 106 ++ src/main/java/org/onap/music/mdbc/tests/Test.java | 105 ++ .../org/onap/music/mdbc/tests/Test_Delete.java | 70 ++ .../org/onap/music/mdbc/tests/Test_Insert.java | 94 ++ .../onap/music/mdbc/tests/Test_Transactions.java | 74 ++ .../org/onap/music/mdbc/tests/package-info.java | 165 +++ .../music/mdbc/tools/CreateNodeConfigurations.java | 70 ++ .../org/onap/music/mdbc/tools/CreatePartition.java | 59 + src/main/javadoc/overview.html | 8 +- .../resources/META-INF/services/java.sql.Driver | 2 +- src/main/resources/mdbc.properties | 10 +- src/main/resources/mdbc_driver.properties | 10 +- src/main/resources/tests.json | 6 +- src/main/shell/mk_jboss_module | 4 +- .../java/com/att/research/mdbc/MDBCUtilsTest.java | 72 -- .../java/com/att/research/mdbc/test/ALLTESTS.java | 14 - .../java/com/att/research/mdbc/test/BasicTest.java | 77 -- .../com/att/research/mdbc/test/CrossSiteTest.java | 447 ------- .../com/att/research/mdbc/test/TestCommon.java | 25 - .../att/research/mdbc/test/TransactionTest.java | 164 --- .../java/org/onap/music/mdbc/MDBCUtilsTest.java | 72 ++ .../java/org/onap/music/mdbc/test/ALLTESTS.java | 14 + .../java/org/onap/music/mdbc/test/BasicTest.java | 77 ++ .../org/onap/music/mdbc/test/CrossSiteTest.java | 447 +++++++ .../java/org/onap/music/mdbc/test/TestCommon.java | 25 + .../org/onap/music/mdbc/test/TransactionTest.java | 164 +++ 136 files changed, 12490 insertions(+), 12490 deletions(-) delete mode 100644 src/main/java/com/att/research/exceptions/MDBCServiceException.java delete mode 100644 src/main/java/com/att/research/exceptions/QueryException.java delete mode 100644 src/main/java/com/att/research/logging/EELFLoggerDelegate.java delete mode 100644 src/main/java/com/att/research/logging/format/AppMessages.java delete mode 100644 src/main/java/com/att/research/logging/format/ErrorSeverity.java delete mode 100644 src/main/java/com/att/research/logging/format/ErrorTypes.java delete mode 100644 src/main/java/com/att/research/mdbc/ArchiveProcess.java delete mode 100644 src/main/java/com/att/research/mdbc/Configuration.java delete mode 100644 src/main/java/com/att/research/mdbc/DatabaseOperations.java delete mode 100644 src/main/java/com/att/research/mdbc/DatabasePartition.java delete mode 100644 src/main/java/com/att/research/mdbc/LockId.java delete mode 100644 src/main/java/com/att/research/mdbc/MDBCUtils.java delete mode 100644 src/main/java/com/att/research/mdbc/MdbcCallableStatement.java delete mode 100644 src/main/java/com/att/research/mdbc/MdbcConnection.java delete mode 100644 src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java delete mode 100644 src/main/java/com/att/research/mdbc/MdbcServer.java delete mode 100644 src/main/java/com/att/research/mdbc/MdbcServerLogic.java delete mode 100644 src/main/java/com/att/research/mdbc/MdbcStatement.java delete mode 100755 src/main/java/com/att/research/mdbc/MusicSqlManager.java delete mode 100755 src/main/java/com/att/research/mdbc/ProxyStatement.java delete mode 100644 src/main/java/com/att/research/mdbc/Range.java delete mode 100644 src/main/java/com/att/research/mdbc/RedoRow.java delete mode 100644 src/main/java/com/att/research/mdbc/StateManager.java delete mode 100755 src/main/java/com/att/research/mdbc/TableInfo.java delete mode 100644 src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java delete mode 100644 src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java delete mode 100644 src/main/java/com/att/research/mdbc/configurations/config-0.json delete mode 100644 src/main/java/com/att/research/mdbc/configurations/ranges.json delete mode 100644 src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json delete mode 100644 src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/DBInterface.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/MixinFactory.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/MusicConnector.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/MusicInterface.java delete mode 100644 src/main/java/com/att/research/mdbc/mixins/MusicMixin.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/Utils.java delete mode 100755 src/main/java/com/att/research/mdbc/mixins/package-info.java delete mode 100755 src/main/java/com/att/research/mdbc/package-info.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/MriReference.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/MusicRangeInformationRow.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/MusixTxDigestId.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/Operation.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/OperationType.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/PartitionInformation.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/StagingTable.java delete mode 100644 src/main/java/com/att/research/mdbc/tables/TxCommitProgress.java delete mode 100644 src/main/java/com/att/research/mdbc/tests/ConnectionTest.java delete mode 100755 src/main/java/com/att/research/mdbc/tests/MAIN.java delete mode 100755 src/main/java/com/att/research/mdbc/tests/Test.java delete mode 100755 src/main/java/com/att/research/mdbc/tests/Test_Delete.java delete mode 100755 src/main/java/com/att/research/mdbc/tests/Test_Insert.java delete mode 100755 src/main/java/com/att/research/mdbc/tests/Test_Transactions.java delete mode 100755 src/main/java/com/att/research/mdbc/tests/package-info.java delete mode 100644 src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java delete mode 100644 src/main/java/com/att/research/mdbc/tools/CreatePartition.java create mode 100644 src/main/java/org/onap/music/exceptions/MDBCServiceException.java create mode 100644 src/main/java/org/onap/music/exceptions/QueryException.java create mode 100644 src/main/java/org/onap/music/logging/EELFLoggerDelegate.java create mode 100644 src/main/java/org/onap/music/logging/format/AppMessages.java create mode 100644 src/main/java/org/onap/music/logging/format/ErrorSeverity.java create mode 100644 src/main/java/org/onap/music/logging/format/ErrorTypes.java create mode 100644 src/main/java/org/onap/music/mdbc/ArchiveProcess.java create mode 100644 src/main/java/org/onap/music/mdbc/Configuration.java create mode 100644 src/main/java/org/onap/music/mdbc/DatabaseOperations.java create mode 100644 src/main/java/org/onap/music/mdbc/DatabasePartition.java create mode 100644 src/main/java/org/onap/music/mdbc/LockId.java create mode 100644 src/main/java/org/onap/music/mdbc/MDBCUtils.java create mode 100644 src/main/java/org/onap/music/mdbc/MdbcCallableStatement.java create mode 100644 src/main/java/org/onap/music/mdbc/MdbcConnection.java create mode 100644 src/main/java/org/onap/music/mdbc/MdbcPreparedStatement.java create mode 100644 src/main/java/org/onap/music/mdbc/MdbcServer.java create mode 100644 src/main/java/org/onap/music/mdbc/MdbcServerLogic.java create mode 100644 src/main/java/org/onap/music/mdbc/MdbcStatement.java create mode 100755 src/main/java/org/onap/music/mdbc/MusicSqlManager.java create mode 100755 src/main/java/org/onap/music/mdbc/ProxyStatement.java create mode 100644 src/main/java/org/onap/music/mdbc/Range.java create mode 100644 src/main/java/org/onap/music/mdbc/RedoRow.java create mode 100644 src/main/java/org/onap/music/mdbc/StateManager.java create mode 100755 src/main/java/org/onap/music/mdbc/TableInfo.java create mode 100644 src/main/java/org/onap/music/mdbc/configurations/NodeConfiguration.java create mode 100644 src/main/java/org/onap/music/mdbc/configurations/TablesConfiguration.java create mode 100644 src/main/java/org/onap/music/mdbc/configurations/config-0.json create mode 100644 src/main/java/org/onap/music/mdbc/configurations/ranges.json create mode 100644 src/main/java/org/onap/music/mdbc/configurations/tableConfiguration.json create mode 100644 src/main/java/org/onap/music/mdbc/examples/EtdbTestClient.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/Cassandra2Mixin.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/CassandraMixin.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/DBInterface.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/MixinFactory.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/MusicConnector.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/MusicInterface.java create mode 100644 src/main/java/org/onap/music/mdbc/mixins/MusicMixin.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/MySQLMixin.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/Utils.java create mode 100755 src/main/java/org/onap/music/mdbc/mixins/package-info.java create mode 100755 src/main/java/org/onap/music/mdbc/package-info.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/MriReference.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/MusicRangeInformationRow.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/MusixTxDigestId.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/Operation.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/OperationType.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/PartitionInformation.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/StagingTable.java create mode 100644 src/main/java/org/onap/music/mdbc/tables/TxCommitProgress.java create mode 100644 src/main/java/org/onap/music/mdbc/tests/ConnectionTest.java create mode 100755 src/main/java/org/onap/music/mdbc/tests/MAIN.java create mode 100755 src/main/java/org/onap/music/mdbc/tests/Test.java create mode 100755 src/main/java/org/onap/music/mdbc/tests/Test_Delete.java create mode 100755 src/main/java/org/onap/music/mdbc/tests/Test_Insert.java create mode 100755 src/main/java/org/onap/music/mdbc/tests/Test_Transactions.java create mode 100755 src/main/java/org/onap/music/mdbc/tests/package-info.java create mode 100644 src/main/java/org/onap/music/mdbc/tools/CreateNodeConfigurations.java create mode 100644 src/main/java/org/onap/music/mdbc/tools/CreatePartition.java delete mode 100644 src/test/java/com/att/research/mdbc/MDBCUtilsTest.java delete mode 100755 src/test/java/com/att/research/mdbc/test/ALLTESTS.java delete mode 100755 src/test/java/com/att/research/mdbc/test/BasicTest.java delete mode 100755 src/test/java/com/att/research/mdbc/test/CrossSiteTest.java delete mode 100755 src/test/java/com/att/research/mdbc/test/TestCommon.java delete mode 100755 src/test/java/com/att/research/mdbc/test/TransactionTest.java create mode 100644 src/test/java/org/onap/music/mdbc/MDBCUtilsTest.java create mode 100755 src/test/java/org/onap/music/mdbc/test/ALLTESTS.java create mode 100755 src/test/java/org/onap/music/mdbc/test/BasicTest.java create mode 100755 src/test/java/org/onap/music/mdbc/test/CrossSiteTest.java create mode 100755 src/test/java/org/onap/music/mdbc/test/TestCommon.java create mode 100755 src/test/java/org/onap/music/mdbc/test/TransactionTest.java diff --git a/README.md b/README.md index 2d21c3b..8f21f2e 100755 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ mvn install:install-file -Dfile=target/MUSIC.jar -DpomFile=./pom.xml 1) Create a configuration file using as a template: -src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json +src/main/java/org/onap/music/mdbc/configurations/tableConfiguration.json The meaning of the fields is as follows: @@ -45,11 +45,11 @@ o replicationFactor: indicates the needs of replication for this partition (the 2) Create the configuration for each node using the command line program in the following location: -src/main/java/com/att/research/mdbc/tools/CreateNodeConfiguration.java +src/main/java/org/onap/music/mdbc/tools/CreateNodeConfiguration.java To run it, use the following parameters: --t ../ETDB/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json -b base -o /Users/quique/Desktop/ +-t ../ETDB/src/main/java/org/onap/music/mdbc/configurations/tableConfiguration.json -b base -o /Users/quique/Desktop/ This program is going to generate all the required configuration json for each ETDB node in the system and additionally initialize all the corresponding rows and tables for the system to correctly work. The meaning of the parameters is: • -t: the tableConfiguration.json explained in the step 1 @@ -63,11 +63,11 @@ Some notes about the limitations of this command line program: 3) Run each of the server in its corresponding node: The ETDB server can be found in the file: -src/main/java/com/att/research/mdbc/MdbcServer.java +src/main/java/org/onap/music/mdbc/MdbcServer.java It requires three parameters: - -c ../ETDB/src/main/java/com/att/research/mdbc/configurations/config-0.json -u jdbc:mysql://localhost -p 30000 + -c ../ETDB/src/main/java/org/onap/music/mdbc/configurations/config-0.json -u jdbc:mysql://localhost -p 30000 -c is a json with the configuration created in step 2. • -u is where the local mysql database is located (without the database name, just the url, see example) @@ -75,7 +75,7 @@ It requires three parameters: 4) Run the clients. A client example can be found in this folder: -src/main/java/com/att/research/mdbc/examples +src/main/java/org/onap/music/mdbc/examples ## Building METRIC @@ -113,7 +113,7 @@ Dirty rows will be copied, as needed back into the database from Cassandra befor 3. If you supply properties to the DriverManager.getConnection(String, Properties) call, use the properties defined below to control behavior of the proxy. 4. Load the driver using the following call: - Class.forName("com.att.research.mdbc.ProxyDriver"); + Class.forName("org.onap.music.mdbc.ProxyDriver"); The following properties can be passed to the JDBC DriverManager.getConnection(String, Properties) call to influence how METRIC works. @@ -160,14 +160,14 @@ these tags. - - com.att.research.mdbc.ProxyDriver + + org.onap.music.mdbc.ProxyDriver ``` -Note: This assumes that you have built and installed the com.att.research.mdbc module within JBoss. +Note: This assumes that you have built and installed the org.onap.music.mdbc module within JBoss. ### To Define a Tomcat DataSource Resource @@ -181,7 +181,7 @@ probably need to make changes to the _connectionProperties_ attribute. type="javax.sql.DataSource" factory="org.apache.tomcat.jdbc.pool.DataSourceFactory" uniqueResourceName="process-engine" - driverClassName="com.att.research.mdbc.ProxyDriver" + driverClassName="org.onap.music.mdbc.ProxyDriver" url="jdbc:mdbc:./camunda-h2-dbs/process-engine;MVCC=TRUE;TRACE_LEVEL_FILE=0;DB_CLOSE_ON_EXIT=FALSE" connectionProperties="myid=0;replicas=0,1,2;music_keyspace=camunda;music_address=localhost" username="sa" @@ -205,7 +205,7 @@ or local file based (`jdbc:h2:path_to_file`) database. ## Testing Mixin Combinations -The files under `src/main/java/com/att/research/mdbc/tests` can be used to test various METRIC +The files under `src/main/java/org/onap/music/mdbc/tests` can be used to test various METRIC operations with various combinations of Mixins. The tests are controlled via the file `src/main/resources/tests.json`. More details are available in the javadoc for this package. diff --git a/pom.xml b/pom.xml index d1f3ef7..05994f5 100755 --- a/pom.xml +++ b/pom.xml @@ -1,6 +1,6 @@ 4.0.0 - com.att.research.mdbc + org.onap.music.mdbc mdbc 0.0.1-SNAPSHOT mdbc diff --git a/src/main/java/com/att/research/exceptions/MDBCServiceException.java b/src/main/java/com/att/research/exceptions/MDBCServiceException.java deleted file mode 100644 index 46cc1f7..0000000 --- a/src/main/java/com/att/research/exceptions/MDBCServiceException.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * ============LICENSE_START========================================== - * org.onap.music - * =================================================================== - * Copyright (c) 2017 AT&T Intellectual Property - * =================================================================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * ============LICENSE_END============================================= - * ==================================================================== - */ - -package com.att.research.exceptions; - -/** - * @author inam - * - */ -public class MDBCServiceException extends Exception { - - - /** - * - */ - private static final long serialVersionUID = 1L; - private int errorCode; - private String errorMessage; - - public int getErrorCode() { - return errorCode; - } - - - public void setErrorCode(int errorCode) { - this.errorCode = errorCode; - } - - - public String getErrorMessage() { - return errorMessage; - } - - - public void setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - } - - - public MDBCServiceException() { - super(); - } - - - public MDBCServiceException(String message) { - super(message); - - } - - - public MDBCServiceException(Throwable cause) { - super(cause); - - } - - - public MDBCServiceException(String message, Throwable cause) { - super(message, cause); - - } - - - public MDBCServiceException(String message, Throwable cause, boolean enableSuppression, - boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - - } - -} diff --git a/src/main/java/com/att/research/exceptions/QueryException.java b/src/main/java/com/att/research/exceptions/QueryException.java deleted file mode 100644 index 77445e5..0000000 --- a/src/main/java/com/att/research/exceptions/QueryException.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * ============LICENSE_START========================================== - * org.onap.music - * =================================================================== - * Copyright (c) 2017 AT&T Intellectual Property - * =================================================================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * ============LICENSE_END============================================= - * ==================================================================== - */ -package com.att.research.exceptions; - - - -/** - * @author inam - * - */ -public class QueryException extends Exception { - - /** - * - */ - private static final long serialVersionUID = 1L; - @SuppressWarnings("unused") - private int errorCode; - - - /** - * - */ - public QueryException() { - super(); - } - - /** - * @param message - */ - public QueryException(String message) { - super(message); - } - - - - /** - * @param message - */ - public QueryException(String message, int errorCode) { - super(message); - this.errorCode = errorCode; - } - - /** - * @param cause - */ - public QueryException(Throwable cause) { - super(cause); - } - - /** - * @param message - * @param cause - */ - public QueryException(String message, Throwable cause) { - super(message, cause); - } - - /** - * @param message - * @param cause - * @param enableSuppression - * @param writableStackTrace - */ - public QueryException(String message, Throwable cause, boolean enableSuppression, - boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } - -} diff --git a/src/main/java/com/att/research/logging/EELFLoggerDelegate.java b/src/main/java/com/att/research/logging/EELFLoggerDelegate.java deleted file mode 100644 index 4e29a75..0000000 --- a/src/main/java/com/att/research/logging/EELFLoggerDelegate.java +++ /dev/null @@ -1,339 +0,0 @@ - -package com.att.research.logging; - -import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN; -import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS; -import static com.att.eelf.configuration.Configuration.MDC_SERVICE_INSTANCE_ID; -import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME; - -import java.net.InetAddress; -import java.text.MessageFormat; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import javax.servlet.http.HttpServletRequest; - -import org.slf4j.MDC; - -import com.att.eelf.configuration.EELFLogger; -import com.att.eelf.configuration.EELFManager; -import com.att.eelf.configuration.SLF4jWrapper; - -public class EELFLoggerDelegate extends SLF4jWrapper implements EELFLogger { - - public static final EELFLogger errorLogger = EELFManager.getInstance().getErrorLogger(); - public static final EELFLogger applicationLogger = EELFManager.getInstance().getApplicationLogger(); - public static final EELFLogger auditLogger = EELFManager.getInstance().getAuditLogger(); - public static final EELFLogger metricsLogger = EELFManager.getInstance().getMetricsLogger(); - public static final EELFLogger debugLogger = EELFManager.getInstance().getDebugLogger(); - - private String className; - private static ConcurrentMap classMap = new ConcurrentHashMap<>(); - - public EELFLoggerDelegate(final String className) { - super(className); - this.className = className; - } - - /** - * Convenience method that gets a logger for the specified class. - * - * @see #getLogger(String) - * - * @param clazz - * @return Instance of EELFLoggerDelegate - */ - public static EELFLoggerDelegate getLogger(Class clazz) { - return getLogger(clazz.getName()); - } - - /** - * Gets a logger for the specified class name. If the logger does not already - * exist in the map, this creates a new logger. - * - * @param className - * If null or empty, uses EELFLoggerDelegate as the class name. - * @return Instance of EELFLoggerDelegate - */ - public static EELFLoggerDelegate getLogger(final String className) { - String classNameNeverNull = className == null || "".equals(className) ? EELFLoggerDelegate.class.getName() - : className; - EELFLoggerDelegate delegate = classMap.get(classNameNeverNull); - if (delegate == null) { - delegate = new EELFLoggerDelegate(className); - classMap.put(className, delegate); - } - return delegate; - } - - /** - * Logs a message at the lowest level: trace. - * - * @param logger - * @param msg - */ - public void trace(EELFLogger logger, String msg) { - if (logger.isTraceEnabled()) { - logger.trace(msg); - } - } - - /** - * Logs a message with parameters at the lowest level: trace. - * - * @param logger - * @param msg - * @param arguments - */ - public void trace(EELFLogger logger, String msg, Object... arguments) { - if (logger.isTraceEnabled()) { - logger.trace(msg, arguments); - } - } - - /** - * Logs a message and throwable at the lowest level: trace. - * - * @param logger - * @param msg - * @param th - */ - public void trace(EELFLogger logger, String msg, Throwable th) { - if (logger.isTraceEnabled()) { - logger.trace(msg, th); - } - } - - /** - * Logs a message at the second-lowest level: debug. - * - * @param logger - * @param msg - */ - public void debug(EELFLogger logger, String msg) { - if (logger.isDebugEnabled()) { - logger.debug(msg); - } - } - - /** - * Logs a message with parameters at the second-lowest level: debug. - * - * @param logger - * @param msg - * @param arguments - */ - public void debug(EELFLogger logger, String msg, Object... arguments) { - if (logger.isDebugEnabled()) { - logger.debug(msg, arguments); - } - } - - /** - * Logs a message and throwable at the second-lowest level: debug. - * - * @param logger - * @param msg - * @param th - */ - public void debug(EELFLogger logger, String msg, Throwable th) { - if (logger.isDebugEnabled()) { - logger.debug(msg, th); - } - } - - /** - * Logs a message at info level. - * - * @param logger - * @param msg - */ - public void info(EELFLogger logger, String msg) { - logger.info(className + " - "+msg); - } - - /** - * Logs a message with parameters at info level. - * - * @param logger - * @param msg - * @param arguments - */ - public void info(EELFLogger logger, String msg, Object... arguments) { - logger.info(msg, arguments); - } - - /** - * Logs a message and throwable at info level. - * - * @param logger - * @param msg - * @param th - */ - public void info(EELFLogger logger, String msg, Throwable th) { - logger.info(msg, th); - } - - /** - * Logs a message at warn level. - * - * @param logger - * @param msg - */ - public void warn(EELFLogger logger, String msg) { - logger.warn(msg); - } - - /** - * Logs a message with parameters at warn level. - * - * @param logger - * @param msg - * @param arguments - */ - public void warn(EELFLogger logger, String msg, Object... arguments) { - logger.warn(msg, arguments); - } - - /** - * Logs a message and throwable at warn level. - * - * @param logger - * @param msg - * @param th - */ - public void warn(EELFLogger logger, String msg, Throwable th) { - logger.warn(msg, th); - } - - /** - * Logs a message at error level. - * - * @param logger - * @param msg - */ - public void error(EELFLogger logger, String msg) { - logger.error(className+ " - " + msg); - } - - /** - * Logs a message with parameters at error level. - * - * @param logger - * @param msg - * @param arguments - */ - public void error(EELFLogger logger, String msg, Object... arguments) { - logger.error(msg, arguments); - } - - /** - * Logs a message and throwable at error level. - * - * @param logger - * @param msg - * @param th - */ - public void error(EELFLogger logger, String msg, Throwable th) { - logger.error(msg, th); - } - - /** - * Logs a message with the associated alarm severity at error level. - * - * @param logger - * @param msg - * @param severtiy - */ - public void error(EELFLogger logger, String msg, Object /*AlarmSeverityEnum*/ severtiy) { - logger.error(msg); - } - - /** - * Initializes the logger context. - */ - public void init() { - setGlobalLoggingContext(); - final String msg = "############################ Logging is started. ############################"; - // These loggers emit the current date-time without being told. - info(applicationLogger, msg); - error(errorLogger, msg); - debug(debugLogger, msg); - info(auditLogger, msg); - info(metricsLogger, msg); - } - - - /** - * Builds a message using a template string and the arguments. - * - * @param message - * @param args - * @return - */ - @SuppressWarnings("unused") - private String formatMessage(String message, Object... args) { - StringBuilder sbFormattedMessage = new StringBuilder(); - if (args != null && args.length > 0 && message != null && message != "") { - MessageFormat mf = new MessageFormat(message); - sbFormattedMessage.append(mf.format(args)); - } else { - sbFormattedMessage.append(message); - } - - return sbFormattedMessage.toString(); - } - - /** - * Loads all the default logging fields into the MDC context. - */ - private void setGlobalLoggingContext() { - MDC.put(MDC_SERVICE_INSTANCE_ID, ""); - try { - MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName()); - MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress()); - } catch (Exception e) { - errorLogger.error("setGlobalLoggingContext failed", e); - } - } - - public static void mdcPut(String key, String value) { - MDC.put(key, value); - } - - public static String mdcGet(String key) { - return MDC.get(key); - } - - public static void mdcRemove(String key) { - MDC.remove(key); - } - - /** - * Loads the RequestId/TransactionId into the MDC which it should be receiving - * with an each incoming REST API request. Also, configures few other request - * based logging fields into the MDC context. - * - * @param req - * @param appName - */ - public void setRequestBasedDefaultsIntoGlobalLoggingContext(HttpServletRequest req, String appName) { - // Load the default fields - setGlobalLoggingContext(); - - // Load the request based fields - if (req != null) { - - - // Rest Path - MDC.put(MDC_SERVICE_NAME, req.getServletPath()); - - // Client IPAddress i.e. IPAddress of the remote host who is making - // this request. - String clientIPAddress = req.getHeader("X-FORWARDED-FOR"); - if (clientIPAddress == null) { - clientIPAddress = req.getRemoteAddr(); - } - } - } -} diff --git a/src/main/java/com/att/research/logging/format/AppMessages.java b/src/main/java/com/att/research/logging/format/AppMessages.java deleted file mode 100644 index a5de413..0000000 --- a/src/main/java/com/att/research/logging/format/AppMessages.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * ============LICENSE_START========================================== - * org.onap.music - * =================================================================== - * Copyright (c) 2017 AT&T Intellectual Property - * =================================================================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * ============LICENSE_END============================================= - * ==================================================================== - */ - -package com.att.research.logging.format; - -/** - * @author inam - * - */ -public enum AppMessages { - - - - /* - * 100-199 Security/Permission Related - Authentication problems - * [ERR100E] Missing Information - * [ERR101E] Authentication error occured - * - * 200-299 Availability/Timeout Related/IO - connectivity error - connection timeout - * [ERR200E] Connectivity - * [ERR201E] Host not available - * [ERR202E] Error while connecting - * [ERR203E] IO Error has occured - * [ERR204E] Execution Interrupted - * [ERR205E] Session Expired - * - * - * - * 300-399 Data Access/Integrity Related - * [ERR300E] Incorrect data - * - * 400-499 - Cassandra Query Related - * - * - * 500-599 - Zookeepr/Locking Related - - * - * - * 600 - 699 - MDBC Service Errors - * [ERR600E] Error initializing the MDBC - * - * 700-799 Schema Interface Type/Validation - received Pay-load checksum is - * invalid - received JSON is not valid - * - * 800-899 Business/Flow Processing Related - check out to service is not - * allowed - Roll-back is done - failed to generate heat file - * - * - * 900-999 Unknown Errors - Unexpected exception - * [ERR900E] Unexpected error occured - * [ERR901E] Number format exception - * - * - * 1000-1099 Reserved - do not use - * - */ - - - - - MISSINGINFO("[ERR100E]", "Missing Information ","Details: NA", "Please check application credentials and/or headers"), - AUTHENTICATIONERROR("[ERR101E]", "Authentication error occured ","Details: NA", "Please verify application credentials"), - - CONNCECTIVITYERROR("[ERR200E]"," Connectivity error","Details: NA ","Please check connectivity to external resources"), - HOSTUNAVAILABLE("[ERR201E]","Host not available","Details: NA","Please verify the host details"), - IOERROR("[ERR203E]","IO Error has occured","","Please check IO"), - EXECUTIONINTERRUPTED("[ERR204E]"," Execution Interrupted","",""), - - - INCORRECTDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"), - MULTIPLERECORDS("[ERR301E]"," Multiple records found",""," Please verify the request payload and try again"), - ALREADYEXIST("[ERR302E]"," Record already exist",""," Please verify the request payload and try again"), - MISSINGDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"), - - QUERYERROR("[ERR400E]","Error while processing query",""," Please verify the query"), - - - UNKNOWNERROR("[ERR900E]"," Unexpected error occured",""," Please check logs for details"); - - - - ErrorTypes eType; - ErrorSeverity alarmSeverity; - ErrorSeverity errorSeverity; - String errorCode; - String errorDescription; - String details; - String resolution; - - - AppMessages(String errorCode, String errorDescription, String details,String resolution) { - - this.errorCode = errorCode; - this.errorDescription = errorDescription; - this.details = details; - this.resolution = resolution; - } - - - - - AppMessages(ErrorTypes eType, ErrorSeverity alarmSeverity, - ErrorSeverity errorSeverity, String errorCode, String errorDescription, String details, - String resolution) { - - this.eType = eType; - this.alarmSeverity = alarmSeverity; - this.errorSeverity = errorSeverity; - this.errorCode = errorCode; - this.errorDescription = errorDescription; - this.details = details; - this.resolution = resolution; - } - - public String getDetails() { - return this.details; - } - - public String getResolution() { - return this.resolution; - } - - public String getErrorCode() { - return this.errorCode; - } - - public String getErrorDescription() { - return this.errorDescription; - } - - - - - - - -} diff --git a/src/main/java/com/att/research/logging/format/ErrorSeverity.java b/src/main/java/com/att/research/logging/format/ErrorSeverity.java deleted file mode 100644 index dbe3e54..0000000 --- a/src/main/java/com/att/research/logging/format/ErrorSeverity.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * ============LICENSE_START========================================== - * org.onap.music - * =================================================================== - * Copyright (c) 2017 AT&T Intellectual Property - * =================================================================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * ============LICENSE_END============================================= - * ==================================================================== - */ -package com.att.research.logging.format; - -/** - * @author inam - * - */ -public enum ErrorSeverity { - INFO, - WARN, - ERROR, - FATAL, - CRITICAL, - MAJOR, - MINOR, - NONE, -} diff --git a/src/main/java/com/att/research/logging/format/ErrorTypes.java b/src/main/java/com/att/research/logging/format/ErrorTypes.java deleted file mode 100644 index 620528d..0000000 --- a/src/main/java/com/att/research/logging/format/ErrorTypes.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * ============LICENSE_START========================================== - * org.onap.music - * =================================================================== - * Copyright (c) 2017 AT&T Intellectual Property - * =================================================================== - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * ============LICENSE_END============================================= - * ==================================================================== - */ -package com.att.research.logging.format; - -import com.att.eelf.i18n.EELFResolvableErrorEnum; - -/** - * @author inam - * - */ -public enum ErrorTypes implements EELFResolvableErrorEnum { - - - CONNECTIONERROR, - SESSIONEXPIRED, - AUTHENTICATIONERROR, - SERVICEUNAVAILABLE, - QUERYERROR, - DATAERROR, - GENERALSERVICEERROR, - MUSICSERVICEERROR, - LOCKINGERROR, - UNKNOWN, - -} diff --git a/src/main/java/com/att/research/mdbc/ArchiveProcess.java b/src/main/java/com/att/research/mdbc/ArchiveProcess.java deleted file mode 100644 index 8290d66..0000000 --- a/src/main/java/com/att/research/mdbc/ArchiveProcess.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.att.research.mdbc; - -import org.json.JSONObject; - -import com.att.research.mdbc.mixins.DBInterface; -import com.att.research.mdbc.mixins.MusicInterface; - -public class ArchiveProcess { - protected MusicInterface mi; - protected DBInterface dbi; - - //TODO: This is a place holder for taking snapshots and moving data from redo record into actual tables - - /** - * This method is called whenever there is a DELETE on the transaction digest and should be called when ownership changes, if required - * It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL DELETE. - * Music propagates it to the other replicas. - * @param tableName This is the table on which the select is being performed - * @param oldRow This is information about the row that is being deleted - */ - @SuppressWarnings("unused") - private void deleteFromEntityTableInMusic(String tableName, JSONObject oldRow) { - TableInfo ti = dbi.getTableInfo(tableName); - mi.deleteFromEntityTableInMusic(ti,tableName, oldRow); - } - - /** - * This method is called whenever there is an INSERT or UPDATE to a the transaction digest, and should be called by an - * ownership chance. It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. - * Music propagates it to the other replicas. If the local database is in the middle of a transaction, the updates to MUSIC are - * delayed until the transaction is either committed or rolled back. - * - * @param tableName This is the table that has changed. - * @param changedRow This is information about the row that has changed, an array of objects representing the data being inserted/updated - */ - @SuppressWarnings("unused") - private void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow) { - //TODO: is this right? should we be saving updates at the client? we should leverage JDBC to handle this - TableInfo ti = dbi.getTableInfo(tableName); - mi.updateDirtyRowAndEntityTableInMusic(ti,tableName, changedRow); - } -} diff --git a/src/main/java/com/att/research/mdbc/Configuration.java b/src/main/java/com/att/research/mdbc/Configuration.java deleted file mode 100644 index 23aa6af..0000000 --- a/src/main/java/com/att/research/mdbc/Configuration.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.att.research.mdbc; - -public class Configuration { - /** The property name to use to connect to cassandra*/ - public static final String KEY_CASSANDRA_URL = "CASSANDRA_URL"; - /** The property name to use to enable/disable the MusicSqlManager entirely. */ - public static final String KEY_DISABLED = "disabled"; - /** The property name to use to select the DB 'mixin'. */ - public static final String KEY_DB_MIXIN_NAME = "MDBC_DB_MIXIN"; - /** The property name to use to select the MUSIC 'mixin'. */ - public static final String KEY_MUSIC_MIXIN_NAME = "MDBC_MUSIC_MIXIN"; - /** The name of the default mixin to use for the DBInterface. */ - public static final String DB_MIXIN_DEFAULT = "mysql";//"h2"; - /** The name of the default mixin to use for the MusicInterface. */ - public static final String MUSIC_MIXIN_DEFAULT = "cassandra2";//"cassandra2"; - /** Default cassandra ulr*/ - public static final String CASSANDRA_URL_DEFAULT = "localhost";//"cassandra2"; -} diff --git a/src/main/java/com/att/research/mdbc/DatabaseOperations.java b/src/main/java/com/att/research/mdbc/DatabaseOperations.java deleted file mode 100644 index c896b84..0000000 --- a/src/main/java/com/att/research/mdbc/DatabaseOperations.java +++ /dev/null @@ -1,465 +0,0 @@ -package com.att.research.mdbc; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.logging.EELFLoggerDelegate; -import org.onap.music.datastore.PreparedQueryObject; -import org.onap.music.exceptions.MusicLockingException; -import org.onap.music.exceptions.MusicQueryException; -import org.onap.music.exceptions.MusicServiceException; -import org.onap.music.main.MusicCore; -import org.onap.music.main.ResultType; -import org.onap.music.main.ReturnType; - -import java.util.*; - -public class DatabaseOperations { - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabaseOperations.class); - /** - * This functions is used to generate cassandra uuid - * @return a random UUID that can be used for fields of type uuid - */ - public static String generateUniqueKey() { - return UUID.randomUUID().toString(); - } - - /** - * This functions returns the primary key used to managed a specific row in the TableToPartition tables in Music - * @param namespace namespace where the TableToPartition resides - * @param tableToPartitionTableName name of the tableToPartition table - * @param tableName name of the application table that is being added to the system - * @return primary key to be used with MUSIC - */ - public static String getTableToPartitionPrimaryKey(String namespace, String tableToPartitionTableName, String tableName){ - return namespace+"."+tableToPartitionTableName+"."+tableName; - } - - /** - * Create a new row for a table, with not assigned partition - * @param namespace namespace where the TableToPartition resides - * @param tableToPartitionTableName name of the tableToPartition table - * @param tableName name of the application table that is being added to the system - * @param lockId if the lock for this key is already hold, this is the id of that lock. - * May be null if lock is not hold for the corresponding key - */ - public static void createNewTableToPartitionRow(String namespace, String tableToPartitionTableName, - String tableName,String lockId) throws MDBCServiceException { - final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,tableName); - StringBuilder insert = new StringBuilder("INSERT INTO ") - .append(namespace) - .append('.') - .append(tableToPartitionTableName) - .append(" (tablename) VALUES ") - .append("('") - .append(tableName) - .append("');"); - PreparedQueryObject query = new PreparedQueryObject(); - query.appendQueryString(insert.toString()); - try { - executedLockedPut(namespace,tableToPartitionTableName,tableName,query,lockId,null); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create new row table to partition table "); - throw new MDBCServiceException("Initialization error: Failure to create new row table to partition table"); - } - } - - /** - * Update the partition to which a table belongs - * @param namespace namespace where the TableToPartition resides - * @param tableToPartitionTableName name of the tableToPartition table - * @param table name of the application table that is being added to the system - * @param newPartition partition to which the application table is assigned - * @param lockId if the lock for this key is already hold, this is the id of that lock. - * May be null if lock is not hold for the corresponding key - */ - public static void updateTableToPartition(String namespace, String tableToPartitionTableName, - String table, String newPartition, String lockId) throws MDBCServiceException { - final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,table); - PreparedQueryObject query = new PreparedQueryObject(); - StringBuilder update = new StringBuilder("UPDATE ") - .append(namespace) - .append('.') - .append(tableToPartitionTableName) - .append(" SET previouspartitions = previouspartitions + {") - .append(newPartition) - .append("}, partition = " ) - .append(newPartition) - .append(" WHERE tablename = '") - .append(table) - .append("';"); - query.appendQueryString(update.toString()); - try { - executedLockedPut(namespace,tableToPartitionTableName,table,query,lockId,null); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to update a row in table to partition table "); - throw new MDBCServiceException("Initialization error: Failure to update a row in table to partition table"); - } - } - - - public static String getPartitionInformationPrimaryKey(String namespace, String partitionInformationTable, String partition){ - return namespace+"."+partitionInformationTable+"."+partition; - } - - /** - * Create a new row, when a new partition is initialized - * @param namespace namespace to which the partition info table resides in Cassandra - * @param partitionInfoTableName name of the partition information table - * @param replicationFactor associated replicated factor for the partition (max of all the tables) - * @param tables list of tables that are within this partitoin - * @param lockId if the lock for this key is already hold, this is the id of that lock. May be null if lock is not hold for the corresponding key - * @return the partition uuid associated to the new row - */ - public static String createPartitionInfoRow(String namespace, String partitionInfoTableName, - int replicationFactor, List tables, String lockId) throws MDBCServiceException { - String id = generateUniqueKey(); - final String primaryKey = getPartitionInformationPrimaryKey(namespace,partitionInfoTableName,id); - StringBuilder insert = new StringBuilder("INSERT INTO ") - .append(namespace) - .append('.') - .append(partitionInfoTableName) - .append(" (partition,replicationfactor,tables) VALUES ") - .append("(") - .append(id) - .append(",") - .append(replicationFactor) - .append(",{"); - boolean first = true; - for(String table: tables){ - if(!first){ - insert.append(","); - } - first = false; - insert.append("'") - .append(table) - .append("'"); - } - insert.append("});"); - PreparedQueryObject query = new PreparedQueryObject(); - query.appendQueryString(insert.toString()); - try { - executedLockedPut(namespace,partitionInfoTableName,id,query,lockId,null); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create new row in partition information table "); - throw new MDBCServiceException("Initialization error: Failure to create new row in partition information table"); - } - return id; - } - - /** - * Update the TIT row and table that currently handles the partition - * @param namespace namespace to which the partition info table resides in Cassandra - * @param partitionInfoTableName name of the partition information table - * @param partitionId row identifier for the partition being modiefd - * @param newTitRow new TIT row and table that are handling this partition - * @param owner owner that is handling the new tit row (url to the corresponding etdb nodej - * @param lockId if the lock for this key is already hold, this is the id of that lock. May be null if lock is not hold for the corresponding key - */ - public static void updateRedoRow(String namespace, String partitionInfoTableName, String partitionId, - RedoRow newTitRow, String owner, String lockId) throws MDBCServiceException { - final String primaryKey = getTableToPartitionPrimaryKey(namespace,partitionInfoTableName,partitionId); - PreparedQueryObject query = new PreparedQueryObject(); - String newOwner = (owner==null)?"":owner; - StringBuilder update = new StringBuilder("UPDATE ") - .append(namespace) - .append('.') - .append(partitionInfoTableName) - .append(" SET currentowner='") - .append(newOwner) - .append("', latesttitindex=") - .append(newTitRow.getRedoRowIndex()) - .append(", latesttittable='") - .append(newTitRow.getRedoTableName()) - .append("' WHERE partition = ") - .append(partitionId) - .append(";"); - query.appendQueryString(update.toString()); - try { - executedLockedPut(namespace,partitionInfoTableName,partitionId,query,lockId,null); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to add new owner to partition in music table "); - throw new MDBCServiceException("Initialization error:Failure to add new owner to partition in music table "); - } - } - - /** - * Create the first row in the history of the redo history table for a given partition - * @param namespace namespace to which the redo history table resides in Cassandra - * @param redoHistoryTableName name of the table where the row is being created - * @param firstTitRow first tit associated to the partition - * @param partitionId partition for which a history is created - */ - public static void createRedoHistoryBeginRow(String namespace, String redoHistoryTableName, - RedoRow firstTitRow, String partitionId, String lockId) throws MDBCServiceException { - createRedoHistoryRow(namespace,redoHistoryTableName,firstTitRow,partitionId, new ArrayList<>(),lockId); - } - - /** - * Create a new row on the history for a given partition - * @param namespace namespace to which the redo history table resides in Cassandra - * @param redoHistoryTableName name of the table where the row is being created - * @param currentRow new tit row associated to the partition - * @param partitionId partition for which a history is created - * @param parentsRows parent tit rows associated to this partition - */ - public static void createRedoHistoryRow(String namespace, String redoHistoryTableName, - RedoRow currentRow, String partitionId, List parentsRows, String lockId) throws MDBCServiceException { - final String primaryKey = partitionId+"-"+currentRow.getRedoTableName()+"-"+currentRow.getRedoRowIndex(); - StringBuilder insert = new StringBuilder("INSERT INTO ") - .append(namespace) - .append('.') - .append(redoHistoryTableName) - .append(" (partition,redotable,redoindex,previousredo) VALUES ") - .append("(") - .append(partitionId) - .append(",'") - .append(currentRow.getRedoTableName()) - .append("',") - .append(currentRow.getRedoRowIndex()) - .append(",{"); - boolean first = true; - for(RedoRow parent: parentsRows){ - if(!first){ - insert.append(","); - } - else{ - first = false; - } - insert.append("('") - .append(parent.getRedoTableName()) - .append("',") - .append(parent.getRedoRowIndex()) - .append("),"); - } - insert.append("});"); - PreparedQueryObject query = new PreparedQueryObject(); - query.appendQueryString(insert.toString()); - try { - executedLockedPut(namespace,redoHistoryTableName,primaryKey,query,lockId,null); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to add new row to redo history"); - throw new MDBCServiceException("Initialization error:Failure to add new row to redo history"); - } - } - - /** - * Creates a new empty tit row - * @param namespace namespace where the tit table is located - * @param titTableName name of the corresponding tit table where the new row is added - * @param partitionId partition to which the redo log is hold - * @return uuid associated to the new row - */ - public static String CreateEmptyTitRow(String namespace, String titTableName, - String partitionId, String lockId) throws MDBCServiceException { - String id = generateUniqueKey(); - StringBuilder insert = new StringBuilder("INSERT INTO ") - .append(namespace) - .append('.') - .append(titTableName) - .append(" (id,applied,latestapplied,partition,redo) VALUES ") - .append("(") - .append(id) - .append(",false,-1,") - .append(partitionId) - .append(",[]);"); - PreparedQueryObject query = new PreparedQueryObject(); - query.appendQueryString(insert.toString()); - try { - executedLockedPut(namespace,titTableName,id,query,lockId,null); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to add new row to transaction information"); - throw new MDBCServiceException("Initialization error:Failure to add new row to transaction information"); - } - return id; - } - - /** - * This function creates the Table To Partition table. It contain information related to - */ - public static void CreateTableToPartitionTable(String musicNamespace, String tableToPartitionTableName) - throws MDBCServiceException { - String tableName = tableToPartitionTableName; - String priKey = "tablename"; - StringBuilder fields = new StringBuilder(); - fields.append("tablename text, "); - fields.append("partition uuid, "); - fields.append("previouspartitions set "); - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", - musicNamespace, tableName, fields, priKey); - try { - executeMusicWriteQuery(musicNamespace,tableName,cql); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create table to partition table"); - throw(e); - } - } - - public static void CreatePartitionInfoTable(String musicNamespace, String partitionInformationTableName) - throws MDBCServiceException { - String tableName = partitionInformationTableName; - String priKey = "partition"; - StringBuilder fields = new StringBuilder(); - fields.append("partition uuid, "); - fields.append("latesttittable text, "); - fields.append("latesttitindex uuid, "); - fields.append("tables set, "); - fields.append("replicationfactor int, "); - fields.append("currentowner text"); - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", - musicNamespace, tableName, fields, priKey); - try { - executeMusicWriteQuery(musicNamespace,tableName,cql); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create partition information table"); - throw(e); - } - } - - public static void CreateRedoHistoryTable(String musicNamespace, String redoHistoryTableName) - throws MDBCServiceException { - String tableName = redoHistoryTableName; - String priKey = "partition,redotable,redoindex"; - StringBuilder fields = new StringBuilder(); - fields.append("partition uuid, "); - fields.append("redotable text, "); - fields.append("redoindex uuid, "); - //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly - fields.append("previousredo set>>"); - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", - musicNamespace, tableName, fields, priKey); - try { - executeMusicWriteQuery(musicNamespace,tableName,cql); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create redo history table"); - throw(e); - } - } - - /** - * This method executes a write query in Music - * @param cql the CQL to be sent to Cassandra - */ - protected static void executeMusicWriteQuery(String keyspace, String table, String cql) - throws MDBCServiceException { - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - ResultType rt = null; - try { - rt = MusicCore.createTable(keyspace,table,pQueryObject,"critical"); - } catch (MusicServiceException e) { - e.printStackTrace(); - } - if (rt.getResult().toLowerCase().equals("failure")) { - throw new MDBCServiceException("Music eventual put failed"); - } - } - - protected static void executedLockedPut(String namespace, String tableName, - String primaryKeyWithoutDomain, PreparedQueryObject queryObject, String lockId, - MusicCore.Condition conditionInfo) throws MDBCServiceException { - ReturnType rt ; - if(lockId==null) { - try { - rt = MusicCore.atomicPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, conditionInfo); - } catch (MusicLockingException e) { - logger.error("Music locked put failed"); - throw new MDBCServiceException("Music locked put failed"); - } catch (MusicServiceException e) { - logger.error("Music service fail: Music locked put failed"); - throw new MDBCServiceException("Music service fail: Music locked put failed"); - } catch (MusicQueryException e) { - logger.error("Music query fail: locked put failed"); - throw new MDBCServiceException("Music query fail: Music locked put failed"); - } - } - else { - rt = MusicCore.criticalPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, lockId, conditionInfo); - } - if (rt.getResult().getResult().toLowerCase().equals("failure")) { - throw new MDBCServiceException("Music locked put failed"); - } - } - - public static void createNamespace(String namespace, int replicationFactor) throws MDBCServiceException { - Map replicationInfo = new HashMap(); - replicationInfo.put("'class'", "'SimpleStrategy'"); - replicationInfo.put("'replication_factor'", replicationFactor); - - PreparedQueryObject queryObject = new PreparedQueryObject(); - queryObject.appendQueryString( - "CREATE KEYSPACE " + namespace + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":")); - - try { - MusicCore.nonKeyRelatedPut(queryObject, "critical"); - } catch (MusicServiceException e) { - if (e.getMessage().equals("Keyspace "+namespace+" already exists")) { - // ignore - } else { - logger.error("Error creating namespace: "+namespace); - throw new MDBCServiceException("Error creating namespace: "+namespace+". Internal error:"+e.getErrorMessage()); - } - } - } - - - /** - * This function creates the MusicTxDigest table. It contain information related to each transaction committed - * * LeaseId: id associated with the lease, text - * * LeaseCounter: transaction number under this lease, bigint \TODO this may need to be a varint later - * * TransactionDigest: text that contains all the changes in the transaction - */ - public static void CreateMusicTxDigest(int musicTxDigestTableNumber, String musicNamespace, String musicTxDigestTableName) throws MDBCServiceException { - String tableName = musicTxDigestTableName; - if(musicTxDigestTableNumber >= 0) { - StringBuilder table = new StringBuilder(); - table.append(tableName); - table.append("-"); - table.append(Integer.toString(musicTxDigestTableNumber)); - tableName=table.toString(); - } - String priKey = "leaseid,leasecounter"; - StringBuilder fields = new StringBuilder(); - fields.append("leaseid text, "); - fields.append("leasecounter varint, "); - fields.append("transactiondigest text ");//notice lack of ',' - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey); - try { - executeMusicWriteQuery(musicNamespace,tableName,cql); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create redo records table"); - throw(e); - } - } - - /** - * This function creates the TransactionInformation table. It contain information related - * to the transactions happening in a given partition. - * * The schema of the table is - * * Id, uiid. - * * Partition, uuid id of the partition - * * LatestApplied, int indicates which values from the redologtable wast the last to be applied to the data tables - * * Applied: boolean, indicates if all the values in this redo log table where already applied to data tables - * * Redo: list of uiids associated to the Redo Records Table - * - */ - public static void CreateMusicRangeInformationTable(String musicNamespace, String musicRangeInformationTableName) throws MDBCServiceException { - String tableName = musicRangeInformationTableName; - String priKey = "id"; - StringBuilder fields = new StringBuilder(); - fields.append("id uuid, "); - fields.append("partition uuid, "); - fields.append("latestapplied int, "); - fields.append("applied boolean, "); - //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly - fields.append("redo list>>> "); - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey); - try { - executeMusicWriteQuery(musicNamespace,tableName,cql); - } catch (MDBCServiceException e) { - logger.error("Initialization error: Failure to create transaction information table"); - throw(e); - } - } - - - -} diff --git a/src/main/java/com/att/research/mdbc/DatabasePartition.java b/src/main/java/com/att/research/mdbc/DatabasePartition.java deleted file mode 100644 index a9b4f3e..0000000 --- a/src/main/java/com/att/research/mdbc/DatabasePartition.java +++ /dev/null @@ -1,189 +0,0 @@ -package com.att.research.mdbc; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.util.HashSet; -import java.util.Set; - -import com.att.research.logging.EELFLoggerDelegate; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; - -/** - * A database range contain information about what ranges should be hosted in the current MDBC instance - * A database range with an empty map, is supposed to contain all the tables in Music. - * @author Enrique Saurez - */ -public class DatabasePartition { - private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabasePartition.class); - - private String musicRangeInformationTable;//Table that currently contains the REDO log for this partition - private String musicRangeInformationIndex;//Index that can be obtained either from - private String musicTxDigestTable; - private String partitionId; - private String lockId; - protected Set ranges; - - /** - * Each range represents a partition of the database, a database partition is a union of this partitions. - * The only requirement is that the ranges are not overlapping. - */ - - public DatabasePartition() { - ranges = new HashSet<>(); - } - - public DatabasePartition(Set knownRanges, String mriIndex, String mriTable, String partitionId, String lockId, String musicTxDigestTable) { - if(knownRanges != null) { - ranges = knownRanges; - } - else { - ranges = new HashSet<>(); - } - - if(musicTxDigestTable != null) { - this.setMusicTxDigestTable(musicTxDigestTable); - } - else{ - this.setMusicTxDigestTable(""); - } - - if(mriIndex != null) { - this.setMusicRangeInformationIndex(mriIndex); - } - else { - this.setMusicRangeInformationIndex(""); - } - - if(mriTable != null) { - this.setMusicRangeInformationTable(mriTable); - } - else { - this.setMusicRangeInformationTable(""); - } - - if(partitionId != null) { - this.setPartitionId(partitionId); - } - else { - this.setPartitionId(""); - } - - if(lockId != null) { - this.setLockId(lockId); - } - else { - this.setLockId(""); - } - } - - public String getMusicRangeInformationTable() { - return musicRangeInformationTable; - } - - public void setMusicRangeInformationTable(String musicRangeInformationTable) { - this.musicRangeInformationTable = musicRangeInformationTable; - } - - public String getMusicRangeInformationIndex() { - return musicRangeInformationIndex; - } - - public void setMusicRangeInformationIndex(String musicRangeInformationIndex) { - this.musicRangeInformationIndex = musicRangeInformationIndex; - } - - /** - * Add a new range to the ones own by the local MDBC - * @param newRange range that is being added - * @throws IllegalArgumentException - */ - public synchronized void addNewRange(Range newRange) { - //Check overlap - for(Range r : ranges) { - if(r.overlaps(newRange)) { - throw new IllegalArgumentException("Range is already contain by a previous range"); - } - } - if(!ranges.contains(newRange)) { - ranges.add(newRange); - } - } - - /** - * Delete a range that is being modified - * @param rangeToDel limits of the range - */ - public synchronized void deleteRange(Range rangeToDel) { - if(!ranges.contains(rangeToDel)) { - logger.error(EELFLoggerDelegate.errorLogger,"Range doesn't exist"); - throw new IllegalArgumentException("Invalid table"); - } - ranges.remove(rangeToDel); - } - - /** - * Get all the ranges that are currently owned - * @return ranges - */ - public synchronized Range[] getSnapshot() { - return (Range[]) ranges.toArray(); - } - - /** - * Serialize the ranges - * @return serialized ranges - */ - public String toJson() { - GsonBuilder builder = new GsonBuilder(); - builder.setPrettyPrinting().serializeNulls();; - Gson gson = builder.create(); - return gson.toJson(this); - } - - /** - * Function to obtain the configuration - * @param filepath path to the database range - * @return a new object of type DatabaseRange - * @throws FileNotFoundException - */ - - public static DatabasePartition readJsonFromFile( String filepath) throws FileNotFoundException { - BufferedReader br; - try { - br = new BufferedReader( - new FileReader(filepath)); - } catch (FileNotFoundException e) { - logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e); - throw e; - } - Gson gson = new Gson(); - DatabasePartition range = gson.fromJson(br, DatabasePartition.class); - return range; - } - - public String getPartitionId() { - return partitionId; - } - - public void setPartitionId(String partitionId) { - this.partitionId = partitionId; - } - - public String getLockId() { - return lockId; - } - - public void setLockId(String lockId) { - this.lockId = lockId; - } - - public String getMusicTxDigestTable() { - return musicTxDigestTable; - } - - public void setMusicTxDigestTable(String musicTxDigestTable) { - this.musicTxDigestTable = musicTxDigestTable; - } -} diff --git a/src/main/java/com/att/research/mdbc/LockId.java b/src/main/java/com/att/research/mdbc/LockId.java deleted file mode 100644 index a1de21a..0000000 --- a/src/main/java/com/att/research/mdbc/LockId.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.att.research.mdbc; - -public class LockId { - private String primaryKey; - private String domain; - private String lockReference; - - public LockId(String primaryKey, String domain, String lockReference){ - this.primaryKey = primaryKey; - this.domain = domain; - if(lockReference == null) { - this.lockReference = ""; - } - else{ - this.lockReference = lockReference; - } - } - - public String getFullyQualifiedLockKey(){ - return this.domain+"."+this.primaryKey; - } - - public String getPrimaryKey() { - return primaryKey; - } - - public void setPrimaryKey(String primaryKey) { - this.primaryKey = primaryKey; - } - - public String getDomain() { - return domain; - } - - public void setDomain(String domain) { - this.domain = domain; - } - - public String getLockReference() { - return lockReference; - } - - public void setLockReference(String lockReference) { - this.lockReference = lockReference; - } -} diff --git a/src/main/java/com/att/research/mdbc/MDBCUtils.java b/src/main/java/com/att/research/mdbc/MDBCUtils.java deleted file mode 100644 index 34f4b10..0000000 --- a/src/main/java/com/att/research/mdbc/MDBCUtils.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.att.research.mdbc; - -import java.io.*; -import java.util.Base64; -import java.util.Deque; -import java.util.HashMap; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.logging.format.AppMessages; -import com.att.research.logging.format.ErrorSeverity; -import com.att.research.logging.format.ErrorTypes; -import com.att.research.mdbc.tables.Operation; -import com.att.research.mdbc.tables.StagingTable; - -import javassist.bytecode.Descriptor.Iterator; - -import org.apache.commons.lang3.tuple.Pair; -import org.json.JSONObject; - -public class MDBCUtils { - /** Write the object to a Base64 string. */ - public static String toString( Serializable o ) throws IOException { - //TODO We may want to also compress beside serialize - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - try { - ObjectOutputStream oos = new ObjectOutputStream(baos); - oos.writeObject(o); - oos.close(); - return Base64.getEncoder().encodeToString(baos.toByteArray()); - } - finally{ - baos.close(); - } - } - - public static String toString( JSONObject o) throws IOException { - //TODO We may want to also compress beside serialize - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - ObjectOutputStream oos = new ObjectOutputStream( baos ); - oos.writeObject( o ); - oos.close(); - return Base64.getEncoder().encodeToString(baos.toByteArray()); - } - - /** Read the object from Base64 string. */ - public static Object fromString( String s ) throws IOException , - ClassNotFoundException { - byte [] data = Base64.getDecoder().decode( s ); - ObjectInputStream ois = new ObjectInputStream( - new ByteArrayInputStream( data ) ); - Object o = ois.readObject(); - ois.close(); - return o; - } - - public static void saveToFile(String serializedContent, String filename, EELFLoggerDelegate logger) throws IOException { - try (PrintWriter fout = new PrintWriter(filename)) { - fout.println(serializedContent); - } catch (FileNotFoundException e) { - if(logger!=null){ - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.IOERROR, ErrorTypes.UNKNOWN, ErrorSeverity.CRITICAL); - } - else { - e.printStackTrace(); - } - throw e; - } - } - -} diff --git a/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java b/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java deleted file mode 100644 index fefce21..0000000 --- a/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java +++ /dev/null @@ -1,738 +0,0 @@ -package com.att.research.mdbc; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.Ref; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLXML; -import java.sql.Statement; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Calendar; -import java.util.Map; - -import com.att.research.logging.EELFLoggerDelegate; - -/** - * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, - * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. - * - * @author Robert Eby - */ -public class MdbcCallableStatement extends MdbcPreparedStatement implements CallableStatement { - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcCallableStatement.class); - @SuppressWarnings("unused") - private static final String DATASTAX_PREFIX = "com.datastax.driver"; - - public MdbcCallableStatement(Statement stmt, MusicSqlManager m) { - super(stmt, m); - } - - public MdbcCallableStatement(Statement stmt, String sql, MusicSqlManager mgr) { - super(stmt, sql, mgr); - } - - @Override - public T unwrap(Class iface) throws SQLException { - logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName()); - return stmt.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName()); - return stmt.isWrapperFor(iface); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal); - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName); - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - ((CallableStatement)stmt).setURL(parameterIndex, x); - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - return ((CallableStatement)stmt).getParameterMetaData(); - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - ((CallableStatement)stmt).setRowId(parameterIndex, x); - } - - @Override - public void setNString(int parameterIndex, String value) throws SQLException { - ((CallableStatement)stmt).setNString(parameterIndex, value); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length); - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setClob(parameterIndex, reader, length); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length); - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, reader, length); - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterIndex, x); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterIndex, x); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setClob(parameterIndex, reader); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterIndex, inputStream); - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, reader); - } - - @Override - public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType); - } - - @Override - public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale); - } - - @Override - public boolean wasNull() throws SQLException { - return ((CallableStatement)stmt).wasNull(); - } - - @Override - public String getString(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getString(parameterIndex); - } - - @Override - public boolean getBoolean(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBoolean(parameterIndex); - } - - @Override - public byte getByte(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getByte(parameterIndex); - } - - @Override - public short getShort(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getShort(parameterIndex); - } - - @Override - public int getInt(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getInt(parameterIndex); - } - - @Override - public long getLong(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getLong(parameterIndex); - } - - @Override - public float getFloat(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getFloat(parameterIndex); - } - - @Override - public double getDouble(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getDouble(parameterIndex); - } - - @SuppressWarnings("deprecation") - @Override - public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { - return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale); - } - - @Override - public byte[] getBytes(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBytes(parameterIndex); - } - - @Override - public Date getDate(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterIndex); - } - - @Override - public Time getTime(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterIndex); - } - - @Override - public Timestamp getTimestamp(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterIndex); - } - - @Override - public Object getObject(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterIndex); - } - - @Override - public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBigDecimal(parameterIndex); - } - - @Override - public Object getObject(int parameterIndex, Map> map) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterIndex, map); - } - - @Override - public Ref getRef(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getRef(parameterIndex); - } - - @Override - public Blob getBlob(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBlob(parameterIndex); - } - - @Override - public Clob getClob(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getClob(parameterIndex); - } - - @Override - public Array getArray(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getArray(parameterIndex); - } - - @Override - public Date getDate(int parameterIndex, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterIndex, cal); - } - - @Override - public Time getTime(int parameterIndex, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterIndex, cal); - } - - @Override - public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal); - } - - @Override - public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName); - } - - @Override - public void registerOutParameter(String parameterName, int sqlType) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType); - } - - @Override - public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale); - } - - @Override - public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName); - } - - @Override - public URL getURL(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getURL(parameterIndex); - } - - @Override - public void setURL(String parameterName, URL val) throws SQLException { - ((CallableStatement)stmt).setURL(parameterName, val); - } - - @Override - public void setNull(String parameterName, int sqlType) throws SQLException { - ((CallableStatement)stmt).setNull(parameterName, sqlType); - } - - @Override - public void setBoolean(String parameterName, boolean x) throws SQLException { - ((CallableStatement)stmt).setBoolean(parameterName, x); - } - - @Override - public void setByte(String parameterName, byte x) throws SQLException { - ((CallableStatement)stmt).setByte(parameterName, x); - } - - @Override - public void setShort(String parameterName, short x) throws SQLException { - ((CallableStatement)stmt).setShort(parameterName, x); - } - - @Override - public void setInt(String parameterName, int x) throws SQLException { - ((CallableStatement)stmt).setInt(parameterName, x); - } - - @Override - public void setLong(String parameterName, long x) throws SQLException { - ((CallableStatement)stmt).setLong(parameterName, x); - } - - @Override - public void setFloat(String parameterName, float x) throws SQLException { - ((CallableStatement)stmt).setFloat(parameterName, x); - } - - @Override - public void setDouble(String parameterName, double x) throws SQLException { - ((CallableStatement)stmt).setDouble(parameterName, x); - } - - @Override - public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { - ((CallableStatement)stmt).setBigDecimal(parameterName, x); - } - - @Override - public void setString(String parameterName, String x) throws SQLException { - ((CallableStatement)stmt).setString(parameterName, x); - } - - @Override - public void setBytes(String parameterName, byte[] x) throws SQLException { - ((CallableStatement)stmt).setBytes(parameterName, x); - } - - @Override - public void setDate(String parameterName, Date x) throws SQLException { - ((CallableStatement)stmt).setDate(parameterName, x); - } - - @Override - public void setTime(String parameterName, Time x) throws SQLException { - ((CallableStatement)stmt).setTime(parameterName, x); - } - - @Override - public void setTimestamp(String parameterName, Timestamp x) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterName, x); - } - - @Override - public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); - } - - @Override - public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); - } - - @Override - public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException { - ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale); - } - - @Override - public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { - ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType); - } - - @Override - public void setObject(String parameterName, Object x) throws SQLException { - ((CallableStatement)stmt).setObject(parameterName, x); - } - - @Override - public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); - } - - @Override - public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setDate(parameterName, x, cal); - } - - @Override - public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTime(parameterName, x, cal); - } - - @Override - public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterName, x, cal); - } - - @Override - public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName); - } - - @Override - public String getString(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getString(parameterName); - } - - @Override - public boolean getBoolean(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBoolean(parameterName); - } - - @Override - public byte getByte(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getByte(parameterName); - } - - @Override - public short getShort(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getShort(parameterName); - } - - @Override - public int getInt(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getInt(parameterName); - } - - @Override - public long getLong(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getLong(parameterName); - } - - @Override - public float getFloat(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getFloat(parameterName); - } - - @Override - public double getDouble(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getDouble(parameterName); - } - - @Override - public byte[] getBytes(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBytes(parameterName); - } - - @Override - public Date getDate(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterName); - } - - @Override - public Time getTime(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterName); - } - - @Override - public Timestamp getTimestamp(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterName); - } - - @Override - public Object getObject(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterName); - } - - @Override - public BigDecimal getBigDecimal(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBigDecimal(parameterName); - } - - @Override - public Object getObject(String parameterName, Map> map) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterName, map); - } - - @Override - public Ref getRef(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getRef(parameterName); - } - - @Override - public Blob getBlob(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBlob(parameterName); - } - - @Override - public Clob getClob(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getClob(parameterName); - } - - @Override - public Array getArray(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getArray(parameterName); - } - - @Override - public Date getDate(String parameterName, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterName, cal); - } - - @Override - public Time getTime(String parameterName, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterName, cal); - } - - @Override - public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterName, cal); - } - - @Override - public URL getURL(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getURL(parameterName); - } - - @Override - public RowId getRowId(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getRowId(parameterIndex); - } - - @Override - public RowId getRowId(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getRowId(parameterName); - } - - @Override - public void setRowId(String parameterName, RowId x) throws SQLException { - ((CallableStatement)stmt).setRowId(parameterName, x); - } - - @Override - public void setNString(String parameterName, String value) throws SQLException { - ((CallableStatement)stmt).setNString(parameterName, value); - } - - @Override - public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length); - } - - @Override - public void setNClob(String parameterName, NClob value) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterName, value); - } - - @Override - public void setClob(String parameterName, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setClob(parameterName, reader, length); - } - - @Override - public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterName, inputStream, length); - } - - @Override - public void setNClob(String parameterName, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterName, reader, length); - } - - @Override - public NClob getNClob(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getNClob(parameterIndex); - } - - @Override - public NClob getNClob(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getNClob(parameterName); - } - - @Override - public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { - ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject); - } - - @Override - public SQLXML getSQLXML(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getSQLXML(parameterIndex); - } - - @Override - public SQLXML getSQLXML(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getSQLXML(parameterName); - } - - @Override - public String getNString(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getNString(parameterIndex); - } - - @Override - public String getNString(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getNString(parameterName); - } - - @Override - public Reader getNCharacterStream(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getNCharacterStream(parameterIndex); - } - - @Override - public Reader getNCharacterStream(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getNCharacterStream(parameterName); - } - - @Override - public Reader getCharacterStream(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getCharacterStream(parameterIndex); - } - - @Override - public Reader getCharacterStream(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getCharacterStream(parameterName); - } - - @Override - public void setBlob(String parameterName, Blob x) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterName, x); - } - - @Override - public void setClob(String parameterName, Clob x) throws SQLException { - ((CallableStatement)stmt).setClob(parameterName, x); - } - - @Override - public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); - } - - @Override - public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); - } - - @Override - public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); - } - - @Override - public void setAsciiStream(String parameterName, InputStream x) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterName, x); - } - - @Override - public void setBinaryStream(String parameterName, InputStream x) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterName, x); - } - - @Override - public void setCharacterStream(String parameterName, Reader reader) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterName, reader); - } - - @Override - public void setNCharacterStream(String parameterName, Reader value) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterName, value); - } - - @Override - public void setClob(String parameterName, Reader reader) throws SQLException { - ((CallableStatement)stmt).setClob(parameterName, reader); - } - - @Override - public void setBlob(String parameterName, InputStream inputStream) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterName, inputStream); - } - - @Override - public void setNClob(String parameterName, Reader reader) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterName, reader); - } - - @Override - public T getObject(int parameterIndex, Class type) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterIndex, type); - } - - @Override - public T getObject(String parameterName, Class type) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterName, type); - } - -} diff --git a/src/main/java/com/att/research/mdbc/MdbcConnection.java b/src/main/java/com/att/research/mdbc/MdbcConnection.java deleted file mode 100644 index 1e845fd..0000000 --- a/src/main/java/com/att/research/mdbc/MdbcConnection.java +++ /dev/null @@ -1,419 +0,0 @@ -package com.att.research.mdbc; - -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.NClob; -import java.sql.PreparedStatement; -import java.sql.SQLClientInfoException; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.Struct; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.Executor; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.exceptions.QueryException; -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.logging.format.AppMessages; -import com.att.research.logging.format.ErrorSeverity; -import com.att.research.logging.format.ErrorTypes; -import com.att.research.mdbc.mixins.MusicInterface; -import com.att.research.mdbc.tables.TxCommitProgress; - - -/** - * ProxyConnection is a proxy to a JDBC driver Connection. It uses the MusicSqlManager to copy - * data to and from Cassandra and the underlying JDBC database as needed. It will notify the underlying - * MusicSqlManager of any calls to commit(), rollback() or setAutoCommit(). - * Otherwise it just forwards all requests to the underlying Connection of the 'real' database. - * - * @author Robert Eby - */ -public class MdbcConnection implements Connection { - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcConnection.class); - - private final String id; // This is the transaction id, assigned to this connection. There is no need to change the id, if connection is reused - private final Connection conn; // the JDBC Connection to the actual underlying database - private final MusicSqlManager mgr; // there should be one MusicSqlManager in use per Connection - private final TxCommitProgress progressKeeper; - private final DatabasePartition partition; - - public MdbcConnection(String id, String url, Connection c, Properties info, MusicInterface mi, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException { - this.id = id; - if (c == null) { - throw new MDBCServiceException("Connection is null"); - } - this.conn = c; - try { - this.mgr = new MusicSqlManager(url, c, info, mi); - } catch (MDBCServiceException e) { - logger.error("Failure in creating Music SQL Manager"); - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw e; - } - try { - this.mgr.setAutoCommit(c.getAutoCommit(),null,null,null); - } catch (SQLException e) { - logger.error("Failure in autocommit"); - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - } - - // Verify the tables in MUSIC match the tables in the database - // and create triggers on any tables that need them - //mgr.synchronizeTableData(); - if ( mgr != null ) try { - mgr.synchronizeTables(); - } catch (QueryException e) { - logger.error("Error syncrhonizing tables"); - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - } - else { - logger.error(EELFLoggerDelegate.errorLogger, "MusicSqlManager was not correctly created", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL); - throw new MDBCServiceException("Music SQL Manager object is null or invalid"); - } - this.progressKeeper = progressKeeper; - this.partition = partition; - logger.debug("Mdbc connection created with id: "+id); - } - - @Override - public T unwrap(Class iface) throws SQLException { - logger.error(EELFLoggerDelegate.errorLogger, "proxyconn unwrap: " + iface.getName()); - return conn.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - logger.error(EELFLoggerDelegate.errorLogger, "proxystatement iswrapperfor: " + iface.getName()); - return conn.isWrapperFor(iface); - } - - @Override - public Statement createStatement() throws SQLException { - return new MdbcCallableStatement(conn.createStatement(), mgr); - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - //TODO: grab the sql call from here and all the other preparestatement calls - return new MdbcPreparedStatement(conn.prepareStatement(sql), sql, mgr); - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - return new MdbcCallableStatement(conn.prepareCall(sql), mgr); - } - - @Override - public String nativeSQL(String sql) throws SQLException { - return conn.nativeSQL(sql); - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - boolean b = conn.getAutoCommit(); - if (b != autoCommit) { - if(progressKeeper!=null) progressKeeper.commitRequested(id); - try { - mgr.setAutoCommit(autoCommit,id,progressKeeper,partition); - if(progressKeeper!=null) - progressKeeper.setMusicDone(id); - } catch (MDBCServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL); - throw new SQLException("Failure commiting to MUSIC"); - } - conn.setAutoCommit(autoCommit); - if(progressKeeper!=null) { - progressKeeper.setSQLDone(id); - } - if(progressKeeper!=null&&progressKeeper.isComplete(id)){ - progressKeeper.reinitializeTxProgress(id); - } - } - } - - @Override - public boolean getAutoCommit() throws SQLException { - return conn.getAutoCommit(); - } - - @Override - public void commit() throws SQLException { - if(progressKeeper.isComplete(id)) { - return; - } - if(progressKeeper != null) { - progressKeeper.commitRequested(id); - } - - try { - mgr.commit(id,progressKeeper,partition); - } catch (MDBCServiceException e) { - //If the commit fail, then a new commitId should be used - logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL); - throw new SQLException("Failure commiting to MUSIC"); - } - - if(progressKeeper != null) { - progressKeeper.setMusicDone(id); - } - - conn.commit(); - - if(progressKeeper != null) { - progressKeeper.setSQLDone(id); - } - //MusicMixin.releaseZKLocks(MusicMixin.currentLockMap.get(getConnID())); - if(progressKeeper.isComplete(id)){ - progressKeeper.reinitializeTxProgress(id); - } - } - - @Override - public void rollback() throws SQLException { - mgr.rollback(); - conn.rollback(); - progressKeeper.reinitializeTxProgress(id); - } - - @Override - public void close() throws SQLException { - logger.debug("Closing mdbc connection with id:"+id); - if (mgr != null) { - logger.debug("Closing mdbc manager with id:"+id); - mgr.close(); - } - if (conn != null && !conn.isClosed()) { - logger.debug("Closing jdbc from mdbc with id:"+id); - conn.close(); - logger.debug("Connection was closed for id:" + id); - } - } - - @Override - public boolean isClosed() throws SQLException { - return conn.isClosed(); - } - - @Override - public DatabaseMetaData getMetaData() throws SQLException { - return conn.getMetaData(); - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - conn.setReadOnly(readOnly); - } - - @Override - public boolean isReadOnly() throws SQLException { - return conn.isReadOnly(); - } - - @Override - public void setCatalog(String catalog) throws SQLException { - conn.setCatalog(catalog); - } - - @Override - public String getCatalog() throws SQLException { - return conn.getCatalog(); - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - conn.setTransactionIsolation(level); - } - - @Override - public int getTransactionIsolation() throws SQLException { - return conn.getTransactionIsolation(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return conn.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - conn.clearWarnings(); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency), mgr); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency), sql, mgr); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency), mgr); - } - - @Override - public Map> getTypeMap() throws SQLException { - return conn.getTypeMap(); - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - conn.setTypeMap(map); - } - - @Override - public void setHoldability(int holdability) throws SQLException { - conn.setHoldability(holdability); - } - - @Override - public int getHoldability() throws SQLException { - return conn.getHoldability(); - } - - @Override - public Savepoint setSavepoint() throws SQLException { - return conn.setSavepoint(); - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - return conn.setSavepoint(name); - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - conn.rollback(savepoint); - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - conn.releaseSavepoint(savepoint); - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability), mgr); - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability), sql, mgr); - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability), mgr); - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - return new MdbcPreparedStatement(conn.prepareStatement(sql, autoGeneratedKeys), sql, mgr); - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return new MdbcPreparedStatement(conn.prepareStatement(sql, columnIndexes), sql, mgr); - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - return new MdbcPreparedStatement(conn.prepareStatement(sql, columnNames), sql, mgr); - } - - @Override - public Clob createClob() throws SQLException { - return conn.createClob(); - } - - @Override - public Blob createBlob() throws SQLException { - return conn.createBlob(); - } - - @Override - public NClob createNClob() throws SQLException { - return conn.createNClob(); - } - - @Override - public SQLXML createSQLXML() throws SQLException { - return conn.createSQLXML(); - } - - @Override - public boolean isValid(int timeout) throws SQLException { - return conn.isValid(timeout); - } - - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - conn.setClientInfo(name, value); - } - - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - conn.setClientInfo(properties); - } - - @Override - public String getClientInfo(String name) throws SQLException { - return conn.getClientInfo(name); - } - - @Override - public Properties getClientInfo() throws SQLException { - return conn.getClientInfo(); - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - return conn.createArrayOf(typeName, elements); - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - return conn.createStruct(typeName, attributes); - } - - @Override - public void setSchema(String schema) throws SQLException { - conn.setSchema(schema); - } - - @Override - public String getSchema() throws SQLException { - return conn.getSchema(); - } - - @Override - public void abort(Executor executor) throws SQLException { - conn.abort(executor); - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - conn.setNetworkTimeout(executor, milliseconds); - } - - @Override - public int getNetworkTimeout() throws SQLException { - return conn.getNetworkTimeout(); - } -} diff --git a/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java b/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java deleted file mode 100644 index d35a20a..0000000 --- a/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java +++ /dev/null @@ -1,743 +0,0 @@ -package com.att.research.mdbc; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Statement; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Calendar; - -import org.apache.commons.lang3.StringUtils; - -import com.att.research.logging.EELFLoggerDelegate; - -/** - * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, - * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. - * - * @author Robert Eby - */ -public class MdbcPreparedStatement extends MdbcStatement implements PreparedStatement { - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcPreparedStatement.class); - private static final String DATASTAX_PREFIX = "com.datastax.driver"; - - final String sql; // holds the sql statement if prepared statement - String[] params; // holds the parameters if prepared statement, indexing starts at 1 - - - public MdbcPreparedStatement(Statement stmt, MusicSqlManager m) { - super(stmt, m); - this.sql = null; - } - - public MdbcPreparedStatement(Statement stmt, String sql, MusicSqlManager mgr) { - super(stmt, sql, mgr); - this.sql = sql; - //indexing starts at 1 - params = new String[StringUtils.countMatches(sql, "?")+1]; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return stmt.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return stmt.isWrapperFor(iface); - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql); - ResultSet r = null; - try { - mgr.preStatementHook(sql); - r = stmt.executeQuery(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return r; - } - - @Override - public int executeUpdate(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public void close() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: "); - stmt.close(); - } - - @Override - public int getMaxFieldSize() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize"); - return stmt.getMaxFieldSize(); - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - stmt.setMaxFieldSize(max); - } - - @Override - public int getMaxRows() throws SQLException { - return stmt.getMaxRows(); - } - - @Override - public void setMaxRows(int max) throws SQLException { - stmt.setMaxRows(max); - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - stmt.setEscapeProcessing(enable); - } - - @Override - public int getQueryTimeout() throws SQLException { - return stmt.getQueryTimeout(); - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds); - stmt.setQueryTimeout(seconds); - } - - @Override - public void cancel() throws SQLException { - stmt.cancel(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return stmt.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - stmt.clearWarnings(); - } - - @Override - public void setCursorName(String name) throws SQLException { - stmt.setCursorName(name); - } - - @Override - public boolean execute(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e); - // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. - boolean ignore = nm.startsWith(DATASTAX_PREFIX); -// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); - if (ignore) { - logger.warn("execute: exception (IGNORED) "+nm); - } else { - logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e); - throw e; - } - } - return b; - } - - @Override - public ResultSet getResultSet() throws SQLException { - return stmt.getResultSet(); - } - - @Override - public int getUpdateCount() throws SQLException { - return stmt.getUpdateCount(); - } - - @Override - public boolean getMoreResults() throws SQLException { - return stmt.getMoreResults(); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - stmt.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return stmt.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - stmt.setFetchSize(rows); - } - - @Override - public int getFetchSize() throws SQLException { - return stmt.getFetchSize(); - } - - @Override - public int getResultSetConcurrency() throws SQLException { - return stmt.getResultSetConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { - return stmt.getResultSetType(); - } - - @Override - public void addBatch(String sql) throws SQLException { - stmt.addBatch(sql); - } - - @Override - public void clearBatch() throws SQLException { - stmt.clearBatch(); - } - - @Override - public int[] executeBatch() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: "); - int[] n = null; - try { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result."); - n = stmt.executeBatch(); - synchronizeTables(null); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public Connection getConnection() throws SQLException { - return stmt.getConnection(); - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - return stmt.getMoreResults(current); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return stmt.getGeneratedKeys(); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, autoGeneratedKeys); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, columnIndexes); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, columnNames); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, autoGeneratedKeys); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, columnIndexes); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, columnNames); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public int getResultSetHoldability() throws SQLException { - return stmt.getResultSetHoldability(); - } - - @Override - public boolean isClosed() throws SQLException { - return stmt.isClosed(); - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - stmt.setPoolable(poolable); - } - - @Override - public boolean isPoolable() throws SQLException { - return stmt.isPoolable(); - } - - @Override - public void closeOnCompletion() throws SQLException { - stmt.closeOnCompletion(); - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - return stmt.isCloseOnCompletion(); - } - - @Override - public ResultSet executeQuery() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql); - ResultSet r = null; - try { - mgr.preStatementHook(sql); - r = ((PreparedStatement)stmt).executeQuery();; - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - e.printStackTrace(); - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeQuery: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return r; - } - - @Override - public int executeUpdate() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = ((PreparedStatement)stmt).executeUpdate(); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - e.printStackTrace(); - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - ((PreparedStatement)stmt).setNull(parameterIndex, sqlType); - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - ((PreparedStatement)stmt).setBoolean(parameterIndex, x); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - ((PreparedStatement)stmt).setByte(parameterIndex, x); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - ((PreparedStatement)stmt).setShort(parameterIndex, x); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - ((PreparedStatement)stmt).setInt(parameterIndex, x); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - ((PreparedStatement)stmt).setLong(parameterIndex, x); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - ((PreparedStatement)stmt).setFloat(parameterIndex, x); - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - ((PreparedStatement)stmt).setDouble(parameterIndex, x); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - ((PreparedStatement)stmt).setString(parameterIndex, x); - params[parameterIndex] = x; - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - ((PreparedStatement)stmt).setBytes(parameterIndex, x); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - ((PreparedStatement)stmt).setDate(parameterIndex, x); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - ((PreparedStatement)stmt).setTime(parameterIndex, x); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - ((PreparedStatement)stmt).setTimestamp(parameterIndex, x); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length); - } - - @SuppressWarnings("deprecation") - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length); - } - - @Override - public void clearParameters() throws SQLException { - ((PreparedStatement)stmt).clearParameters(); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType); - } - - @Override - public void setObject(int parameterIndex, Object x) throws SQLException { - ((PreparedStatement)stmt).setObject(parameterIndex, x); - } - - @Override - public boolean execute() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = ((PreparedStatement)stmt).execute(); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - e.printStackTrace(); - String nm = e.getClass().getName(); - // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. - boolean ignore = nm.startsWith(DATASTAX_PREFIX); -// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); - if (ignore) { - logger.warn("execute: exception (IGNORED) "+nm); - } else { - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - throw e; - } - } - return b; - } - - @Override - public void addBatch() throws SQLException { - ((PreparedStatement)stmt).addBatch(); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { - ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { - ((PreparedStatement)stmt).setRef(parameterIndex, x); - } - - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { - ((PreparedStatement)stmt).setBlob(parameterIndex, x); - } - - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { - ((PreparedStatement)stmt).setClob(parameterIndex, x); - } - - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { - ((PreparedStatement)stmt).setArray(parameterIndex, x); - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - return ((PreparedStatement)stmt).getMetaData(); - } - - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - ((PreparedStatement)stmt).setDate(parameterIndex, x, cal); - } - - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - ((PreparedStatement)stmt).setTime(parameterIndex, x, cal); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal); - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName); - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - ((CallableStatement)stmt).setURL(parameterIndex, x); - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - return ((CallableStatement)stmt).getParameterMetaData(); - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - ((CallableStatement)stmt).setRowId(parameterIndex, x); - } - - @Override - public void setNString(int parameterIndex, String value) throws SQLException { - ((CallableStatement)stmt).setNString(parameterIndex, value); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length); - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setClob(parameterIndex, reader, length); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length); - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, reader, length); - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterIndex, x); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterIndex, x); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setClob(parameterIndex, reader); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterIndex, inputStream); - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, reader); - } - -} diff --git a/src/main/java/com/att/research/mdbc/MdbcServer.java b/src/main/java/com/att/research/mdbc/MdbcServer.java deleted file mode 100644 index 54accaa..0000000 --- a/src/main/java/com/att/research/mdbc/MdbcServer.java +++ /dev/null @@ -1,162 +0,0 @@ -package com.att.research.mdbc; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.att.research.mdbc.configurations.NodeConfiguration; -import org.apache.calcite.avatica.remote.Driver.Serialization; -import org.apache.calcite.avatica.remote.LocalService; -import org.apache.calcite.avatica.server.HttpServer; -import org.apache.calcite.avatica.util.Unsafe; - -import com.att.research.logging.EELFLoggerDelegate; -import com.beust.jcommander.IStringConverter; -import com.beust.jcommander.JCommander; -import com.beust.jcommander.Parameter; - -import java.util.Locale; -import java.util.Properties; - -public class MdbcServer { - public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(MdbcStatement.class); - - @Parameter(names = { "-c", "--configuration" }, required = true, - description = "This is the file that contains the ranges that are assigned to this MDBC server") - private String configurationFile; - - @Parameter(names = { "-u", "--url" }, required = true, - description = "JDBC driver url for the server") - private String url; - - @Parameter(names = { "-p", "--port" }, required = true, - description = "Port the server should bind") - private int port; - - @Parameter(names = { "-s", "--user" }, required = true, - description = "Mysql usr") - private String user; - - @Parameter(names = { "-a", "--pass" }, required = true, - description = "Mysql password") - private String password; - - final private Serialization serialization = Serialization.PROTOBUF; - - @Parameter(names = { "-h", "-help", "--help" }, help = true, - description = "Print the help message") - private boolean help = false; - - private NodeConfiguration config; - private HttpServer server; - - public void start() { - if (null != server) { - LOG.error("The server was already started"); - Unsafe.systemExit(ExitCodes.ALREADY_STARTED.ordinal()); - return; - } - - try { - config = NodeConfiguration.readJsonFromFile(configurationFile); - //\TODO Add configuration file with Server Info - Properties connectionProps = new Properties(); - connectionProps.put("user", user); - connectionProps.put("password", password); - MdbcServerLogic meta = new MdbcServerLogic(url,connectionProps,config); - LocalService service = new LocalService(meta); - - // Construct the server - this.server = new HttpServer.Builder<>() - .withHandler(service, serialization) - .withPort(port) - .build(); - - // Then start it - server.start(); - - LOG.info("Started Avatica server on port {} with serialization {}", server.getPort(), - serialization); - } catch (Exception e) { - LOG.error("Failed to start Avatica server", e); - Unsafe.systemExit(ExitCodes.START_FAILED.ordinal()); - } - } - - public void stop() { - if (null != server) { - server.stop(); - server = null; - } - } - - public void join() throws InterruptedException { - server.join(); - } - - public static void main(String[] args) { - final MdbcServer server = new MdbcServer(); - @SuppressWarnings("deprecation") - JCommander jc = new JCommander(server, args); - if (server.help) { - jc.usage(); - Unsafe.systemExit(ExitCodes.USAGE.ordinal()); - return; - } - - server.start(); - - // Try to clean up when the server is stopped. - Runtime.getRuntime().addShutdownHook( - new Thread(new Runnable() { - @Override public void run() { - LOG.info("Stopping server"); - server.stop(); - LOG.info("Server stopped"); - } - })); - - try { - server.join(); - } catch (InterruptedException e) { - // Reset interruption - Thread.currentThread().interrupt(); - // And exit now. - return; - } - } - - /** - * Converter from String to Serialization. Must be public for JCommander. - */ - public static class SerializationConverter implements IStringConverter { - @Override public Serialization convert(String value) { - return Serialization.valueOf(value.toUpperCase(Locale.ROOT)); - } - } - - /** - * Codes for exit conditions - */ - private enum ExitCodes { - NORMAL, - ALREADY_STARTED, // 1 - START_FAILED, // 2 - USAGE; // 3 - } -} - -// End StandaloneServer.java diff --git a/src/main/java/com/att/research/mdbc/MdbcServerLogic.java b/src/main/java/com/att/research/mdbc/MdbcServerLogic.java deleted file mode 100644 index 72cc73c..0000000 --- a/src/main/java/com/att/research/mdbc/MdbcServerLogic.java +++ /dev/null @@ -1,312 +0,0 @@ -package com.att.research.mdbc; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.mdbc.configurations.NodeConfiguration; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.calcite.avatica.MissingResultsException; -import org.apache.calcite.avatica.NoSuchStatementException; -import org.apache.calcite.avatica.jdbc.JdbcMeta; -import org.apache.calcite.avatica.remote.TypedValue; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.logging.format.AppMessages; -import com.att.research.logging.format.ErrorSeverity; -import com.att.research.logging.format.ErrorTypes; - -public class MdbcServerLogic extends JdbcMeta{ - - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcServerLogic.class); - - StateManager manager; - DatabasePartition ranges; - String name; - String sqlDatabase; - - //TODO: Delete this properties after debugging - private final Properties info; - private final Cache connectionCache; - - public MdbcServerLogic(String Url, Properties info,NodeConfiguration config) throws SQLException, MDBCServiceException { - super(Url,info); - this.ranges = config.partition; - this.name = config.nodeName; - this.sqlDatabase = config.sqlDatabaseName; - this.manager = new StateManager(Url,info,this.ranges,this.sqlDatabase); - this.info = info; - int concurrencyLevel = Integer.parseInt( - info.getProperty(ConnectionCacheSettings.CONCURRENCY_LEVEL.key(), - ConnectionCacheSettings.CONCURRENCY_LEVEL.defaultValue())); - int initialCapacity = Integer.parseInt( - info.getProperty(ConnectionCacheSettings.INITIAL_CAPACITY.key(), - ConnectionCacheSettings.INITIAL_CAPACITY.defaultValue())); - long maxCapacity = Long.parseLong( - info.getProperty(ConnectionCacheSettings.MAX_CAPACITY.key(), - ConnectionCacheSettings.MAX_CAPACITY.defaultValue())); - long connectionExpiryDuration = Long.parseLong( - info.getProperty(ConnectionCacheSettings.EXPIRY_DURATION.key(), - ConnectionCacheSettings.EXPIRY_DURATION.defaultValue())); - TimeUnit connectionExpiryUnit = TimeUnit.valueOf( - info.getProperty(ConnectionCacheSettings.EXPIRY_UNIT.key(), - ConnectionCacheSettings.EXPIRY_UNIT.defaultValue())); - this.connectionCache = CacheBuilder.newBuilder() - .concurrencyLevel(concurrencyLevel) - .initialCapacity(initialCapacity) - .maximumSize(maxCapacity) - .expireAfterAccess(connectionExpiryDuration, connectionExpiryUnit) - .removalListener(new ConnectionExpiryHandler()) - .build(); - } - - @Override - protected Connection getConnection(String id) throws SQLException { - if (id == null) { - throw new NullPointerException("Connection id is null"); - } - //\TODO: don't use connectionCache, use this.manager internal state - Connection conn = connectionCache.getIfPresent(id); - if (conn == null) { - this.manager.CloseConnection(id); - logger.error(EELFLoggerDelegate.errorLogger,"Connection not found: invalid id, closed, or expired: " - + id); - throw new RuntimeException(" Connection not found: invalid id, closed, or expired: " + id); - } - return conn; - } - - @Override - public void openConnection(ConnectionHandle ch, Map information) { - Properties fullInfo = new Properties(); - fullInfo.putAll(this.info); - if (information != null) { - fullInfo.putAll(information); - } - - final ConcurrentMap cacheAsMap = this.connectionCache.asMap(); - if (cacheAsMap.containsKey(ch.id)) { - throw new RuntimeException("Connection already exists: " + ch.id); - } - // Avoid global synchronization of connection opening - try { - this.manager.OpenConnection(ch.id, info); - Connection conn = this.manager.GetConnection(ch.id); - if(conn == null) { - logger.error(EELFLoggerDelegate.errorLogger, "Connection created was null"); - throw new RuntimeException("Connection created was null for connection: " + ch.id); - } - Connection loadedConn = cacheAsMap.putIfAbsent(ch.id, conn); - logger.info("connection created with id {}", ch.id); - // Race condition: someone beat us to storing the connection in the cache. - if (loadedConn != null) { - //\TODO check if we added an additional race condition for this - this.manager.CloseConnection(ch.id); - conn.close(); - throw new RuntimeException("Connection already exists: " + ch.id); - } - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw new RuntimeException(e); - } - } - - @Override - public void closeConnection(ConnectionHandle ch) { - //\TODO use state connection instead - Connection conn = connectionCache.getIfPresent(ch.id); - if (conn == null) { - logger.debug("client requested close unknown connection {}", ch); - return; - } - logger.trace("closing connection {}", ch); - try { - conn.close(); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw new RuntimeException(e.getMessage()); - } finally { - connectionCache.invalidate(ch.id); - this.manager.CloseConnection(ch.id); - logger.info("connection closed with id {}", ch.id); - } - } - - @Override - public void commit(ConnectionHandle ch) { - try { - super.commit(ch); - logger.debug("connection commited with id {}", ch.id); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - } - - //\TODO All the following functions can be deleted - // Added for two reasons: debugging and logging - @Override - public StatementHandle prepare(ConnectionHandle ch, String sql, long maxRowCount) { - StatementHandle h; - try { - h = super.prepare(ch, sql, maxRowCount); - logger.debug("prepared statement {}", h); - } catch (Exception e ) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(e); - } - return h; - } - - @Override - public ExecuteResult prepareAndExecute(StatementHandle h, String sql, long maxRowCount, int maxRowsInFirstFrame, - PrepareCallback callback) throws NoSuchStatementException { - ExecuteResult e; - try { - e = super.prepareAndExecute(h, sql, maxRowCount,maxRowsInFirstFrame,callback); - logger.debug("prepare and execute statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return e; - } - - @Override - public ExecuteBatchResult prepareAndExecuteBatch(StatementHandle h, List sqlCommands) - throws NoSuchStatementException { - ExecuteBatchResult e; - try { - e = super.prepareAndExecuteBatch(h, sqlCommands); - logger.debug("prepare and execute batch statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return e; - } - - @Override - public ExecuteBatchResult executeBatch(StatementHandle h, List> parameterValues) - throws NoSuchStatementException { - ExecuteBatchResult e; - try { - e = super.executeBatch(h, parameterValues); - logger.debug("execute batch statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return e; - } - - @Override - public Frame fetch(StatementHandle h, long offset, int fetchMaxRowCount) - throws NoSuchStatementException, MissingResultsException { - Frame f; - try { - f = super.fetch(h, offset, fetchMaxRowCount); - logger.debug("fetch statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return f; - } - - @Override - public ExecuteResult execute(StatementHandle h, List parameterValues, long maxRowCount) - throws NoSuchStatementException { - ExecuteResult e; - try { - e = super.execute(h, parameterValues, maxRowCount); - logger.debug("fetch statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return e; - } - - @Override - public ExecuteResult execute(StatementHandle h, List parameterValues, int maxRowsInFirstFrame) - throws NoSuchStatementException { - ExecuteResult e; - try { - e = super.execute(h, parameterValues, maxRowsInFirstFrame); - logger.debug("fetch statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return e; - } - - @Override - public StatementHandle createStatement(ConnectionHandle ch) { - StatementHandle h; - try { - h = super.createStatement(ch); - logger.debug("create statement {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - return h; - } - - @Override - public void closeStatement(StatementHandle h) { - try { - super.closeStatement(h); - logger.debug("statement closed {}", h); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - } - - - - - - - - @Override - public void rollback(ConnectionHandle ch) { - try { - super.rollback(ch); - logger.debug("connection rollback with id {}", ch.id); - } catch (Exception err ) { - logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw(err); - } - } - - private class ConnectionExpiryHandler - implements RemovalListener { - - public void onRemoval(RemovalNotification notification) { - String connectionId = notification.getKey(); - Connection doomed = notification.getValue(); - logger.debug("Expiring connection {} because {}", connectionId, notification.getCause()); - try { - if (doomed != null) { - doomed.close(); - } - } catch (Throwable t) { - logger.warn("Exception thrown while expiring connection {}", connectionId, t); - } - } - } -} - - diff --git a/src/main/java/com/att/research/mdbc/MdbcStatement.java b/src/main/java/com/att/research/mdbc/MdbcStatement.java deleted file mode 100644 index e03fbda..0000000 --- a/src/main/java/com/att/research/mdbc/MdbcStatement.java +++ /dev/null @@ -1,416 +0,0 @@ -package com.att.research.mdbc; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.Statement; - -import com.att.research.exceptions.QueryException; -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.logging.format.AppMessages; -import com.att.research.logging.format.ErrorSeverity; -import com.att.research.logging.format.ErrorTypes; - -/** - * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, - * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. - * - * @author Robert Eby - */ -public class MdbcStatement implements Statement { - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcStatement.class); - private static final String DATASTAX_PREFIX = "com.datastax.driver"; - - final Statement stmt; // the Statement that we are proxying - final MusicSqlManager mgr; - //\TODO We may need to all pass the connection object to support autocommit - - public MdbcStatement(Statement s, MusicSqlManager m) { - this.stmt = s; - this.mgr = m; - } - - public MdbcStatement(Statement stmt, String sql, MusicSqlManager mgr) { - //\TODO why there is a constructor with a sql parameter in a not PreparedStatement - this.stmt = stmt; - this.mgr = mgr; - } - - @Override - public T unwrap(Class iface) throws SQLException { - logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName()); - return stmt.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName()); - return stmt.isWrapperFor(iface); - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql); - ResultSet r = null; - try { - mgr.preStatementHook(sql); - r = stmt.executeQuery(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return r; - } - - @Override - public int executeUpdate(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public void close() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: "); - stmt.close(); - } - - @Override - public int getMaxFieldSize() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize"); - return stmt.getMaxFieldSize(); - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - stmt.setMaxFieldSize(max); - } - - @Override - public int getMaxRows() throws SQLException { - return stmt.getMaxRows(); - } - - @Override - public void setMaxRows(int max) throws SQLException { - stmt.setMaxRows(max); - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - stmt.setEscapeProcessing(enable); - } - - @Override - public int getQueryTimeout() throws SQLException { - return stmt.getQueryTimeout(); - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - //\TODO: we also need to implement a higher level timeout in MDBC - logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds); - stmt.setQueryTimeout(seconds); - } - - @Override - public void cancel() throws SQLException { - stmt.cancel(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return stmt.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - stmt.clearWarnings(); - } - - @Override - public void setCursorName(String name) throws SQLException { - stmt.setCursorName(name); - } - - @Override - public boolean execute(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - //\TODO Add the result of the postStatementHook to b - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e); - // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. - boolean ignore = nm.startsWith(DATASTAX_PREFIX); -// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); - if (ignore) { - logger.warn("execute: exception (IGNORED) "+nm); - } else { - logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e); - throw e; - } - } - return b; - } - - @Override - public ResultSet getResultSet() throws SQLException { - return stmt.getResultSet(); - } - - @Override - public int getUpdateCount() throws SQLException { - return stmt.getUpdateCount(); - } - - @Override - public boolean getMoreResults() throws SQLException { - return stmt.getMoreResults(); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - stmt.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return stmt.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - stmt.setFetchSize(rows); - } - - @Override - public int getFetchSize() throws SQLException { - return stmt.getFetchSize(); - } - - @Override - public int getResultSetConcurrency() throws SQLException { - return stmt.getResultSetConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { - return stmt.getResultSetType(); - } - - @Override - public void addBatch(String sql) throws SQLException { - stmt.addBatch(sql); - } - - @Override - public void clearBatch() throws SQLException { - stmt.clearBatch(); - } - - @Override - public int[] executeBatch() throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: "); - int[] n = null; - try { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result."); - n = stmt.executeBatch(); - synchronizeTables(null); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public Connection getConnection() throws SQLException { - return stmt.getConnection(); - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - return stmt.getMoreResults(current); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return stmt.getGeneratedKeys(); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, autoGeneratedKeys); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, columnIndexes); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, columnNames); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, autoGeneratedKeys); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, columnIndexes); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); - //\TODO Idem to the other execute without columnNames - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, columnNames); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public int getResultSetHoldability() throws SQLException { - return stmt.getResultSetHoldability(); - } - - @Override - public boolean isClosed() throws SQLException { - return stmt.isClosed(); - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - stmt.setPoolable(poolable); - } - - @Override - public boolean isPoolable() throws SQLException { - return stmt.isPoolable(); - } - - @Override - public void closeOnCompletion() throws SQLException { - stmt.closeOnCompletion(); - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - return stmt.isCloseOnCompletion(); - } - - protected void synchronizeTables(String sql) { - if (sql == null || sql.trim().toLowerCase().startsWith("create")) { - if (mgr != null) { - try { - mgr.synchronizeTables(); - } catch (QueryException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - } - } - } - } -} diff --git a/src/main/java/com/att/research/mdbc/MusicSqlManager.java b/src/main/java/com/att/research/mdbc/MusicSqlManager.java deleted file mode 100755 index f73bdb6..0000000 --- a/src/main/java/com/att/research/mdbc/MusicSqlManager.java +++ /dev/null @@ -1,308 +0,0 @@ -package com.att.research.mdbc; - -import java.sql.Connection; -import java.util.*; - -import org.json.JSONObject; - -import com.att.research.mdbc.mixins.DBInterface; -import com.att.research.mdbc.mixins.MixinFactory; -import com.att.research.mdbc.mixins.MusicInterface; -import com.att.research.mdbc.mixins.Utils; -import com.att.research.mdbc.tables.StagingTable; -import com.att.research.mdbc.tables.TxCommitProgress; -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.exceptions.QueryException; -import com.att.research.logging.*; -import com.att.research.logging.format.AppMessages; -import com.att.research.logging.format.ErrorSeverity; -import com.att.research.logging.format.ErrorTypes; - -/** -*

-* MUSIC SQL Manager - code that helps take data written to a SQL database and seamlessly integrates it -* with MUSIC that maintains data in a No-SQL data-store -* (Cassandra) and protects access to it with a distributed -* locking service (based on Zookeeper). -*

-*

-* This code will support transactions by taking note of the value of the autoCommit flag, and of calls -* to commit() and rollback(). These calls should be made by the user's JDBC -* client. -*

-* -* @author Bharath Balasubramanian, Robert Eby -*/ -public class MusicSqlManager { - - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicSqlManager.class); - - private final DBInterface dbi; - private final MusicInterface mi; - private final Set table_set; - private final HashMap transactionDigest; - private boolean autocommit; // a copy of the autocommit flag from the JDBC Connection - - /** - * Build a MusicSqlManager for a DB connection. This construct may only be called by getMusicSqlManager(), - * which will ensure that only one MusicSqlManager is created per URL. - * This is the location where the appropriate mixins to use for the MusicSqlManager should be determined. - * They should be picked based upon the URL and the properties passed to this constructor. - *

- * At the present time, we only support the use of the H2Mixin (for access to a local H2 database), - * with the CassandraMixin (for direct access to a Cassandra noSQL DB as the persistence layer). - *

- * - * @param url the JDBC URL which was used to connection to the database - * @param conn the actual connection to the database - * @param info properties passed from the initial JDBC connect() call - * @throws MDBCServiceException - */ - public MusicSqlManager(String url, Connection conn, Properties info, MusicInterface mi) throws MDBCServiceException { - try { - info.putAll(Utils.getMdbcProperties()); - String mixinDb = info.getProperty(Configuration.KEY_DB_MIXIN_NAME, Configuration.DB_MIXIN_DEFAULT); - this.dbi = MixinFactory.createDBInterface(mixinDb, this, url, conn, info); - this.mi = mi; - this.table_set = Collections.synchronizedSet(new HashSet()); - this.autocommit = true; - this.transactionDigest = new HashMap(); - - }catch(Exception e) { - throw new MDBCServiceException(e.getMessage()); - } - } - - public void setAutoCommit(boolean b,String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException { - if (b != autocommit) { - autocommit = b; - logger.debug(EELFLoggerDelegate.applicationLogger,"autocommit changed to "+b); - if (b) { - // My reading is that turning autoCOmmit ON should automatically commit any outstanding transaction - if(txId == null || txId.isEmpty()) { - logger.error(EELFLoggerDelegate.errorLogger, "Connection ID is null",AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - throw new MDBCServiceException("tx id is null"); - } - commit(txId,progressKeeper,partition); - } - } - } - - /** - * Close this MusicSqlManager. - */ - public void close() { - if (dbi != null) { - dbi.close(); - } - } - - /** - * Code to be run within the DB driver before a SQL statement is executed. This is where tables - * can be synchronized before a SELECT, for those databases that do not support SELECT triggers. - * @param sql the SQL statement that is about to be executed - */ - public void preStatementHook(final String sql) { - dbi.preStatementHook(sql); - } - /** - * Code to be run within the DB driver after a SQL statement has been executed. This is where remote - * statement actions can be copied back to Cassandra/MUSIC. - * @param sql the SQL statement that was executed - */ - public void postStatementHook(final String sql) { - dbi.postStatementHook(sql,transactionDigest); - } - /** - * Synchronize the list of tables in SQL with the list in MUSIC. This function should be called when the - * proxy first starts, and whenever there is the possibility that tables were created or dropped. It is synchronized - * in order to prevent multiple threads from running this code in parallel. - */ - public synchronized void synchronizeTables() throws QueryException { - Set set1 = dbi.getSQLTableSet(); // set of tables in the database - logger.debug(EELFLoggerDelegate.applicationLogger, "synchronizing tables:" + set1); - for (String tableName : set1) { - // This map will be filled in if this table was previously discovered - if (!table_set.contains(tableName) && !dbi.getReservedTblNames().contains(tableName)) { - logger.info(EELFLoggerDelegate.applicationLogger, "New table discovered: "+tableName); - try { - TableInfo ti = dbi.getTableInfo(tableName); - mi.initializeMusicForTable(ti,tableName); - //\TODO Verify if table info can be modify in the previous step, if not this step can be deleted - ti = dbi.getTableInfo(tableName); - mi.createDirtyRowTable(ti,tableName); - dbi.createSQLTriggers(tableName); - table_set.add(tableName); - synchronizeTableData(tableName); - logger.debug(EELFLoggerDelegate.applicationLogger, "synchronized tables:" + - table_set.size() + "/" + set1.size() + "tables uploaded"); - } catch (Exception e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - //logger.error(EELFLoggerDelegate.errorLogger, "Exception synchronizeTables: "+e); - throw new QueryException(); - } - } - } - -// Set set2 = getMusicTableSet(music_ns); - // not working - fix later -// for (String tbl : set2) { -// if (!set1.contains(tbl)) { -// logger.debug("Old table dropped: "+tbl); -// dropSQLTriggers(tbl, conn); -// // ZZTODO drop camunda table ? -// } -// } - } - - /** - * On startup, copy dirty data from Cassandra to H2. May not be needed. - * @param tableName - */ - public void synchronizeTableData(String tableName) { - // TODO - copy MUSIC -> H2 - dbi.synchronizeData(tableName); - } - /** - * This method is called whenever there is a SELECT on a local SQL table, and should be called by the underlying databases - * triggering mechanism. It first checks the local dirty bits table to see if there are any keys in Cassandra whose value - * has not yet been sent to SQL. If there are, the appropriate values are copied from Cassandra to the local database. - * Under normal execution, this function behaves as a NOP operation. - * @param tableName This is the table on which the SELECT is being performed - */ - public void readDirtyRowsAndUpdateDb(String tableName) { - mi.readDirtyRowsAndUpdateDb(dbi,tableName); - } - - - - - /** - * This method gets the primary key that the music interfaces uses by default. - * If the front end uses a primary key, this will not match what is used in the MUSIC interface - * @return - */ - public String getMusicDefaultPrimaryKeyName() { - return mi.getMusicDefaultPrimaryKeyName(); - } - - /** - * Asks music interface to provide the function to create a primary key - * e.g. uuid(), 1, "unique_aksd419fjc" - * @return - */ - public String generateUniqueKey() { - // - return mi.generateUniqueKey(); - } - - - /** - * Perform a commit, as requested by the JDBC driver. If any row updates have been delayed, - * they are performed now and copied into MUSIC. - * @throws MDBCServiceException - */ - public synchronized void commit(String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException { - logger.debug(EELFLoggerDelegate.applicationLogger, " commit "); - // transaction was committed -- add all the updates into the REDO-Log in MUSIC - try { - mi.commitLog(dbi, partition, transactionDigest, txId, progressKeeper); - }catch(MDBCServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); - throw e; - } - } - - /** - * Perform a rollback, as requested by the JDBC driver. If any row updates have been delayed, - * they are discarded. - */ - public synchronized void rollback() { - // transaction was rolled back - discard the updates - logger.debug(EELFLoggerDelegate.applicationLogger, "Rollback");; - transactionDigest.clear(); - } - - /** - * Get all - * @param table - * @param dbRow - * @return - */ - public String getMusicKeyFromRowWithoutPrimaryIndexes(String table, JSONObject dbRow) { - TableInfo ti = dbi.getTableInfo(table); - return mi.getMusicKeyFromRowWithoutPrimaryIndexes(ti,table, dbRow); - } - - public String getMusicKeyFromRow(String table, JSONObject dbRow) { - TableInfo ti = dbi.getTableInfo(table); - return mi.getMusicKeyFromRow(ti,table, dbRow); - } - - /** - * Returns all keys that matches the current sql statement, and not in already updated keys. - * - * @param sql the query that we are getting keys for - * @deprecated - */ - public ArrayList getMusicKeys(String sql) { - ArrayList musicKeys = new ArrayList(); - //\TODO See if this is required - /* - try { - net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql); - if (stmt instanceof Insert) { - Insert s = (Insert) stmt; - String tbl = s.getTable().getName(); - musicKeys.add(generatePrimaryKey()); - } else { - String tbl; - String where = ""; - if (stmt instanceof Update){ - Update u = (Update) stmt; - tbl = u.getTables().get(0).getName(); - where = u.getWhere().toString(); - } else if (stmt instanceof Delete) { - Delete d = (Delete) stmt; - tbl = d.getTable().getName(); - if (d.getWhere()!=null) { - where = d.getWhere().toString(); - } - } else { - System.err.println("Not recognized sql type"); - tbl = ""; - } - String dbiSelect = "SELECT * FROM " + tbl; - if (!where.equals("")) { - dbiSelect += "WHERE" + where; - } - ResultSet rs = dbi.executeSQLRead(dbiSelect); - musicKeys.addAll(getMusicKeysWhere(tbl, Utils.parseResults(dbi.getTableInfo(tbl), rs))); - rs.getStatement().close(); - } - } catch (JSQLParserException | SQLException e) { - - e.printStackTrace(); - } - System.err.print("MusicKeys:"); - for(String musicKey:musicKeys) { - System.out.print(musicKey + ","); - } - */ - return musicKeys; - } - - public void own(List ranges) { - throw new java.lang.UnsupportedOperationException("function not implemented yet"); - } - - public void appendRange(String rangeId, List ranges) { - throw new java.lang.UnsupportedOperationException("function not implemented yet"); - } - - public void relinquish(String ownerId, String rangeId) { - throw new java.lang.UnsupportedOperationException("function not implemented yet"); - } - - -} diff --git a/src/main/java/com/att/research/mdbc/ProxyStatement.java b/src/main/java/com/att/research/mdbc/ProxyStatement.java deleted file mode 100755 index 0b5edd8..0000000 --- a/src/main/java/com/att/research/mdbc/ProxyStatement.java +++ /dev/null @@ -1,1262 +0,0 @@ -package com.att.research.mdbc; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Statement; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Calendar; -import java.util.Map; - -import org.apache.log4j.Logger; - -import com.att.research.exceptions.QueryException; - -/** - * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, - * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. - * - * @author Robert Eby - */ -public class ProxyStatement implements CallableStatement { - private static final Logger logger = Logger.getLogger(ProxyStatement.class); - private static final String DATASTAX_PREFIX = "com.datastax.driver"; - - private final Statement stmt; // the Statement that we are proxying - private final MusicSqlManager mgr; - - public ProxyStatement(Statement s, MusicSqlManager m) { - this.stmt = s; - this.mgr = m; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return stmt.unwrap(iface); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return stmt.isWrapperFor(iface); - } - - @Override - public ResultSet executeQuery(String sql) throws SQLException { - logger.debug("executeQuery: "+sql); - ResultSet r = null; - try { - mgr.preStatementHook(sql); - r = stmt.executeQuery(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("executeQuery: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return r; - } - - @Override - public int executeUpdate(String sql) throws SQLException { - logger.debug("executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public void close() throws SQLException { - stmt.close(); - } - - @Override - public int getMaxFieldSize() throws SQLException { - return stmt.getMaxFieldSize(); - } - - @Override - public void setMaxFieldSize(int max) throws SQLException { - stmt.setMaxFieldSize(max); - } - - @Override - public int getMaxRows() throws SQLException { - return stmt.getMaxRows(); - } - - @Override - public void setMaxRows(int max) throws SQLException { - stmt.setMaxRows(max); - } - - @Override - public void setEscapeProcessing(boolean enable) throws SQLException { - stmt.setEscapeProcessing(enable); - } - - @Override - public int getQueryTimeout() throws SQLException { - return stmt.getQueryTimeout(); - } - - @Override - public void setQueryTimeout(int seconds) throws SQLException { - stmt.setQueryTimeout(seconds); - } - - @Override - public void cancel() throws SQLException { - stmt.cancel(); - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return stmt.getWarnings(); - } - - @Override - public void clearWarnings() throws SQLException { - stmt.clearWarnings(); - } - - @Override - public void setCursorName(String name) throws SQLException { - stmt.setCursorName(name); - } - - @Override - public boolean execute(String sql) throws SQLException { - logger.debug("execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. - boolean ignore = nm.startsWith(DATASTAX_PREFIX); -// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); - if (ignore) { - logger.warn("execute: exception (IGNORED) "+nm); - } else { - logger.warn("execute: exception "+nm); - throw e; - } - } - return b; - } - - @Override - public ResultSet getResultSet() throws SQLException { - return stmt.getResultSet(); - } - - @Override - public int getUpdateCount() throws SQLException { - return stmt.getUpdateCount(); - } - - @Override - public boolean getMoreResults() throws SQLException { - return stmt.getMoreResults(); - } - - @Override - public void setFetchDirection(int direction) throws SQLException { - stmt.setFetchDirection(direction); - } - - @Override - public int getFetchDirection() throws SQLException { - return stmt.getFetchDirection(); - } - - @Override - public void setFetchSize(int rows) throws SQLException { - stmt.setFetchSize(rows); - } - - @Override - public int getFetchSize() throws SQLException { - return stmt.getFetchSize(); - } - - @Override - public int getResultSetConcurrency() throws SQLException { - return stmt.getResultSetConcurrency(); - } - - @Override - public int getResultSetType() throws SQLException { - return stmt.getResultSetType(); - } - - @Override - public void addBatch(String sql) throws SQLException { - stmt.addBatch(sql); - } - - @Override - public void clearBatch() throws SQLException { - stmt.clearBatch(); - } - - @Override - public int[] executeBatch() throws SQLException { - logger.debug("executeBatch"); - int[] n = null; - try { - logger.warn("executeBatch() is not supported by MDBC; your results may be incorrect as a result."); - n = stmt.executeBatch(); - synchronizeTables(null); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("executeBatch: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public Connection getConnection() throws SQLException { - return stmt.getConnection(); - } - - @Override - public boolean getMoreResults(int current) throws SQLException { - return stmt.getMoreResults(current); - } - - @Override - public ResultSet getGeneratedKeys() throws SQLException { - return stmt.getGeneratedKeys(); - } - - @Override - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - logger.debug("executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, autoGeneratedKeys); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - logger.debug("executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, columnIndexes); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - logger.debug("executeUpdate: "+sql); - int n = 0; - try { - mgr.preStatementHook(sql); - n = stmt.executeUpdate(sql, columnNames); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("executeUpdate: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return n; - } - - @Override - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - logger.debug("execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, autoGeneratedKeys); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - logger.debug("execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, columnIndexes); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public boolean execute(String sql, String[] columnNames) throws SQLException { - logger.debug("execute: "+sql); - boolean b = false; - try { - mgr.preStatementHook(sql); - b = stmt.execute(sql, columnNames); - mgr.postStatementHook(sql); - synchronizeTables(sql); - } catch (Exception e) { - String nm = e.getClass().getName(); - logger.warn("execute: exception "+nm); - if (!nm.startsWith(DATASTAX_PREFIX)) - throw e; - } - return b; - } - - @Override - public int getResultSetHoldability() throws SQLException { - return stmt.getResultSetHoldability(); - } - - @Override - public boolean isClosed() throws SQLException { - return stmt.isClosed(); - } - - @Override - public void setPoolable(boolean poolable) throws SQLException { - stmt.setPoolable(poolable); - } - - @Override - public boolean isPoolable() throws SQLException { - return stmt.isPoolable(); - } - - @Override - public void closeOnCompletion() throws SQLException { - stmt.closeOnCompletion(); - } - - @Override - public boolean isCloseOnCompletion() throws SQLException { - return stmt.isCloseOnCompletion(); - } - - @Override - public ResultSet executeQuery() throws SQLException { - logger.debug("executeQuery"); - return ((PreparedStatement)stmt).executeQuery(); - } - - @Override - public int executeUpdate() throws SQLException { - logger.debug("executeUpdate"); - return ((PreparedStatement)stmt).executeUpdate(); - } - - @Override - public void setNull(int parameterIndex, int sqlType) throws SQLException { - ((PreparedStatement)stmt).setNull(parameterIndex, sqlType); - } - - @Override - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - ((PreparedStatement)stmt).setBoolean(parameterIndex, x); - } - - @Override - public void setByte(int parameterIndex, byte x) throws SQLException { - ((PreparedStatement)stmt).setByte(parameterIndex, x); - } - - @Override - public void setShort(int parameterIndex, short x) throws SQLException { - ((PreparedStatement)stmt).setShort(parameterIndex, x); - } - - @Override - public void setInt(int parameterIndex, int x) throws SQLException { - ((PreparedStatement)stmt).setInt(parameterIndex, x); - } - - @Override - public void setLong(int parameterIndex, long x) throws SQLException { - ((PreparedStatement)stmt).setLong(parameterIndex, x); - } - - @Override - public void setFloat(int parameterIndex, float x) throws SQLException { - ((PreparedStatement)stmt).setFloat(parameterIndex, x); - } - - @Override - public void setDouble(int parameterIndex, double x) throws SQLException { - ((PreparedStatement)stmt).setDouble(parameterIndex, x); - } - - @Override - public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x); - } - - @Override - public void setString(int parameterIndex, String x) throws SQLException { - ((PreparedStatement)stmt).setString(parameterIndex, x); - } - - @Override - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - ((PreparedStatement)stmt).setBytes(parameterIndex, x); - } - - @Override - public void setDate(int parameterIndex, Date x) throws SQLException { - ((PreparedStatement)stmt).setDate(parameterIndex, x); - } - - @Override - public void setTime(int parameterIndex, Time x) throws SQLException { - ((PreparedStatement)stmt).setTime(parameterIndex, x); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - ((PreparedStatement)stmt).setTimestamp(parameterIndex, x); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length); - } - - @SuppressWarnings("deprecation") - @Override - public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { - ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length); - } - - @Override - public void clearParameters() throws SQLException { - ((PreparedStatement)stmt).clearParameters(); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { - ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType); - } - - @Override - public void setObject(int parameterIndex, Object x) throws SQLException { - ((PreparedStatement)stmt).setObject(parameterIndex, x); - } - - @Override - public boolean execute() throws SQLException { - return ((PreparedStatement)stmt).execute(); - } - - @Override - public void addBatch() throws SQLException { - ((PreparedStatement)stmt).addBatch(); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { - ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void setRef(int parameterIndex, Ref x) throws SQLException { - ((PreparedStatement)stmt).setRef(parameterIndex, x); - } - - @Override - public void setBlob(int parameterIndex, Blob x) throws SQLException { - ((PreparedStatement)stmt).setBlob(parameterIndex, x); - } - - @Override - public void setClob(int parameterIndex, Clob x) throws SQLException { - ((PreparedStatement)stmt).setClob(parameterIndex, x); - } - - @Override - public void setArray(int parameterIndex, Array x) throws SQLException { - ((PreparedStatement)stmt).setArray(parameterIndex, x); - } - - @Override - public ResultSetMetaData getMetaData() throws SQLException { - return ((PreparedStatement)stmt).getMetaData(); - } - - @Override - public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - ((PreparedStatement)stmt).setDate(parameterIndex, x, cal); - } - - @Override - public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - ((PreparedStatement)stmt).setTime(parameterIndex, x, cal); - } - - @Override - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal); - } - - @Override - public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName); - } - - @Override - public void setURL(int parameterIndex, URL x) throws SQLException { - ((CallableStatement)stmt).setURL(parameterIndex, x); - } - - @Override - public ParameterMetaData getParameterMetaData() throws SQLException { - return ((CallableStatement)stmt).getParameterMetaData(); - } - - @Override - public void setRowId(int parameterIndex, RowId x) throws SQLException { - ((CallableStatement)stmt).setRowId(parameterIndex, x); - } - - @Override - public void setNString(int parameterIndex, String value) throws SQLException { - ((CallableStatement)stmt).setNString(parameterIndex, value); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length); - } - - @Override - public void setNClob(int parameterIndex, NClob value) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setClob(parameterIndex, reader, length); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length); - } - - @Override - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, reader, length); - } - - @Override - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject); - } - - @Override - public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { - ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length); - } - - @Override - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterIndex, x); - } - - @Override - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterIndex, x); - } - - @Override - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader); - } - - @Override - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value); - } - - @Override - public void setClob(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setClob(parameterIndex, reader); - } - - @Override - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterIndex, inputStream); - } - - @Override - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterIndex, reader); - } - - @Override - public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType); - } - - @Override - public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale); - } - - @Override - public boolean wasNull() throws SQLException { - return ((CallableStatement)stmt).wasNull(); - } - - @Override - public String getString(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getString(parameterIndex); - } - - @Override - public boolean getBoolean(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBoolean(parameterIndex); - } - - @Override - public byte getByte(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getByte(parameterIndex); - } - - @Override - public short getShort(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getShort(parameterIndex); - } - - @Override - public int getInt(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getInt(parameterIndex); - } - - @Override - public long getLong(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getLong(parameterIndex); - } - - @Override - public float getFloat(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getFloat(parameterIndex); - } - - @Override - public double getDouble(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getDouble(parameterIndex); - } - - @SuppressWarnings("deprecation") - @Override - public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { - return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale); - } - - @Override - public byte[] getBytes(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBytes(parameterIndex); - } - - @Override - public Date getDate(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterIndex); - } - - @Override - public Time getTime(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterIndex); - } - - @Override - public Timestamp getTimestamp(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterIndex); - } - - @Override - public Object getObject(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterIndex); - } - - @Override - public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBigDecimal(parameterIndex); - } - - @Override - public Object getObject(int parameterIndex, Map> map) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterIndex, map); - } - - @Override - public Ref getRef(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getRef(parameterIndex); - } - - @Override - public Blob getBlob(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getBlob(parameterIndex); - } - - @Override - public Clob getClob(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getClob(parameterIndex); - } - - @Override - public Array getArray(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getArray(parameterIndex); - } - - @Override - public Date getDate(int parameterIndex, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterIndex, cal); - } - - @Override - public Time getTime(int parameterIndex, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterIndex, cal); - } - - @Override - public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal); - } - - @Override - public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName); - } - - @Override - public void registerOutParameter(String parameterName, int sqlType) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType); - } - - @Override - public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale); - } - - @Override - public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName); - } - - @Override - public URL getURL(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getURL(parameterIndex); - } - - @Override - public void setURL(String parameterName, URL val) throws SQLException { - ((CallableStatement)stmt).setURL(parameterName, val); - } - - @Override - public void setNull(String parameterName, int sqlType) throws SQLException { - ((CallableStatement)stmt).setNull(parameterName, sqlType); - } - - @Override - public void setBoolean(String parameterName, boolean x) throws SQLException { - ((CallableStatement)stmt).setBoolean(parameterName, x); - } - - @Override - public void setByte(String parameterName, byte x) throws SQLException { - ((CallableStatement)stmt).setByte(parameterName, x); - } - - @Override - public void setShort(String parameterName, short x) throws SQLException { - ((CallableStatement)stmt).setShort(parameterName, x); - } - - @Override - public void setInt(String parameterName, int x) throws SQLException { - ((CallableStatement)stmt).setInt(parameterName, x); - } - - @Override - public void setLong(String parameterName, long x) throws SQLException { - ((CallableStatement)stmt).setLong(parameterName, x); - } - - @Override - public void setFloat(String parameterName, float x) throws SQLException { - ((CallableStatement)stmt).setFloat(parameterName, x); - } - - @Override - public void setDouble(String parameterName, double x) throws SQLException { - ((CallableStatement)stmt).setDouble(parameterName, x); - } - - @Override - public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { - ((CallableStatement)stmt).setBigDecimal(parameterName, x); - } - - @Override - public void setString(String parameterName, String x) throws SQLException { - ((CallableStatement)stmt).setString(parameterName, x); - } - - @Override - public void setBytes(String parameterName, byte[] x) throws SQLException { - ((CallableStatement)stmt).setBytes(parameterName, x); - } - - @Override - public void setDate(String parameterName, Date x) throws SQLException { - ((CallableStatement)stmt).setDate(parameterName, x); - } - - @Override - public void setTime(String parameterName, Time x) throws SQLException { - ((CallableStatement)stmt).setTime(parameterName, x); - } - - @Override - public void setTimestamp(String parameterName, Timestamp x) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterName, x); - } - - @Override - public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); - } - - @Override - public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); - } - - @Override - public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException { - ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale); - } - - @Override - public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { - ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType); - } - - @Override - public void setObject(String parameterName, Object x) throws SQLException { - ((CallableStatement)stmt).setObject(parameterName, x); - } - - @Override - public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); - } - - @Override - public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setDate(parameterName, x, cal); - } - - @Override - public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTime(parameterName, x, cal); - } - - @Override - public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { - ((CallableStatement)stmt).setTimestamp(parameterName, x, cal); - } - - @Override - public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { - ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName); - } - - @Override - public String getString(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getString(parameterName); - } - - @Override - public boolean getBoolean(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBoolean(parameterName); - } - - @Override - public byte getByte(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getByte(parameterName); - } - - @Override - public short getShort(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getShort(parameterName); - } - - @Override - public int getInt(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getInt(parameterName); - } - - @Override - public long getLong(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getLong(parameterName); - } - - @Override - public float getFloat(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getFloat(parameterName); - } - - @Override - public double getDouble(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getDouble(parameterName); - } - - @Override - public byte[] getBytes(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBytes(parameterName); - } - - @Override - public Date getDate(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterName); - } - - @Override - public Time getTime(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterName); - } - - @Override - public Timestamp getTimestamp(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterName); - } - - @Override - public Object getObject(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterName); - } - - @Override - public BigDecimal getBigDecimal(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBigDecimal(parameterName); - } - - @Override - public Object getObject(String parameterName, Map> map) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterName, map); - } - - @Override - public Ref getRef(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getRef(parameterName); - } - - @Override - public Blob getBlob(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getBlob(parameterName); - } - - @Override - public Clob getClob(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getClob(parameterName); - } - - @Override - public Array getArray(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getArray(parameterName); - } - - @Override - public Date getDate(String parameterName, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getDate(parameterName, cal); - } - - @Override - public Time getTime(String parameterName, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTime(parameterName, cal); - } - - @Override - public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { - return ((CallableStatement)stmt).getTimestamp(parameterName, cal); - } - - @Override - public URL getURL(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getURL(parameterName); - } - - @Override - public RowId getRowId(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getRowId(parameterIndex); - } - - @Override - public RowId getRowId(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getRowId(parameterName); - } - - @Override - public void setRowId(String parameterName, RowId x) throws SQLException { - ((CallableStatement)stmt).setRowId(parameterName, x); - } - - @Override - public void setNString(String parameterName, String value) throws SQLException { - ((CallableStatement)stmt).setNString(parameterName, value); - } - - @Override - public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length); - } - - @Override - public void setNClob(String parameterName, NClob value) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterName, value); - } - - @Override - public void setClob(String parameterName, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setClob(parameterName, reader, length); - } - - @Override - public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterName, inputStream, length); - } - - @Override - public void setNClob(String parameterName, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterName, reader, length); - } - - @Override - public NClob getNClob(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getNClob(parameterIndex); - } - - @Override - public NClob getNClob(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getNClob(parameterName); - } - - @Override - public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { - ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject); - } - - @Override - public SQLXML getSQLXML(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getSQLXML(parameterIndex); - } - - @Override - public SQLXML getSQLXML(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getSQLXML(parameterName); - } - - @Override - public String getNString(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getNString(parameterIndex); - } - - @Override - public String getNString(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getNString(parameterName); - } - - @Override - public Reader getNCharacterStream(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getNCharacterStream(parameterIndex); - } - - @Override - public Reader getNCharacterStream(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getNCharacterStream(parameterName); - } - - @Override - public Reader getCharacterStream(int parameterIndex) throws SQLException { - return ((CallableStatement)stmt).getCharacterStream(parameterIndex); - } - - @Override - public Reader getCharacterStream(String parameterName) throws SQLException { - return ((CallableStatement)stmt).getCharacterStream(parameterName); - } - - @Override - public void setBlob(String parameterName, Blob x) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterName, x); - } - - @Override - public void setClob(String parameterName, Clob x) throws SQLException { - ((CallableStatement)stmt).setClob(parameterName, x); - } - - @Override - public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); - } - - @Override - public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); - } - - @Override - public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); - } - - @Override - public void setAsciiStream(String parameterName, InputStream x) throws SQLException { - ((CallableStatement)stmt).setAsciiStream(parameterName, x); - } - - @Override - public void setBinaryStream(String parameterName, InputStream x) throws SQLException { - ((CallableStatement)stmt).setBinaryStream(parameterName, x); - } - - @Override - public void setCharacterStream(String parameterName, Reader reader) throws SQLException { - ((CallableStatement)stmt).setCharacterStream(parameterName, reader); - } - - @Override - public void setNCharacterStream(String parameterName, Reader value) throws SQLException { - ((CallableStatement)stmt).setNCharacterStream(parameterName, value); - } - - @Override - public void setClob(String parameterName, Reader reader) throws SQLException { - ((CallableStatement)stmt).setClob(parameterName, reader); - } - - @Override - public void setBlob(String parameterName, InputStream inputStream) throws SQLException { - ((CallableStatement)stmt).setBlob(parameterName, inputStream); - } - - @Override - public void setNClob(String parameterName, Reader reader) throws SQLException { - ((CallableStatement)stmt).setNClob(parameterName, reader); - } - - @Override - public T getObject(int parameterIndex, Class type) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterIndex, type); - } - - @Override - public T getObject(String parameterName, Class type) throws SQLException { - return ((CallableStatement)stmt).getObject(parameterName, type); - } - - private void synchronizeTables(String sql) { - if (sql == null || sql.trim().toLowerCase().startsWith("create")) { - if (mgr != null) { - try { - mgr.synchronizeTables(); - } catch (QueryException e) { - - e.printStackTrace(); - } - } - } - } -} diff --git a/src/main/java/com/att/research/mdbc/Range.java b/src/main/java/com/att/research/mdbc/Range.java deleted file mode 100644 index 4d80a51..0000000 --- a/src/main/java/com/att/research/mdbc/Range.java +++ /dev/null @@ -1,34 +0,0 @@ -package com.att.research.mdbc; - -import java.io.Serializable; - - -/** - * This class represent a range of the whole database - * For now a range represents directly a table in Cassandra - * In the future we may decide to partition ranges differently - * @author Enrique Saurez - */ -public class Range implements Serializable { - - private static final long serialVersionUID = 1610744496930800088L; - - final public String table; - - public Range(String table) { - this.table = table; - } - - /** - * Compares to Range types - * @param other the other range against which this is compared - * @return the equality result - */ - public boolean equal(Range other) { - return (table == other.table); - } - - public boolean overlaps(Range other) { - return table == other.table; - } -} \ No newline at end of file diff --git a/src/main/java/com/att/research/mdbc/RedoRow.java b/src/main/java/com/att/research/mdbc/RedoRow.java deleted file mode 100644 index c024fe7..0000000 --- a/src/main/java/com/att/research/mdbc/RedoRow.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.att.research.mdbc; - -public class RedoRow { - private String redoTableName; - private String redoRowIndex; - - public RedoRow(){} - - public RedoRow(String redoTableName, String redoRowIndex){ - this.redoRowIndex = redoRowIndex; - this.redoTableName = redoTableName; - } - - public String getRedoTableName() { - return redoTableName; - } - - public void setRedoTableName(String redoTableName) { - this.redoTableName = redoTableName; - } - - public String getRedoRowIndex() { - return redoRowIndex; - } - - public void setRedoRowIndex(String redoRowIndex) { - this.redoRowIndex = redoRowIndex; - } -} diff --git a/src/main/java/com/att/research/mdbc/StateManager.java b/src/main/java/com/att/research/mdbc/StateManager.java deleted file mode 100644 index 0a4a409..0000000 --- a/src/main/java/com/att/research/mdbc/StateManager.java +++ /dev/null @@ -1,209 +0,0 @@ -package com.att.research.mdbc; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.logging.format.AppMessages; -import com.att.research.logging.format.ErrorSeverity; -import com.att.research.logging.format.ErrorTypes; -import com.att.research.mdbc.mixins.MixinFactory; -import com.att.research.mdbc.mixins.MusicInterface; -import com.att.research.mdbc.mixins.MusicMixin; -import com.att.research.mdbc.tables.TxCommitProgress; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; - -/** - * \TODO Implement an interface for the server logic and a factory - * @author Enrique Saurez - */ -public class StateManager { - - //\TODO We need to fix the auto-commit mode and multiple transactions with the same connection - - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StateManager.class); - - /** - * This is the interface used by all the MusicSqlManagers, - * that are created by the MDBC Server - * @see MusicInterface - */ - private MusicInterface musicManager; - /** - * This is the Running Queries information table. - * It mainly contains information about the entities - * that have being committed so far. - */ - private TxCommitProgress transactionInfo; - - private Map mdbcConnections; - - private String sqlDatabase; - - private String url; - - private Properties info; - - @SuppressWarnings("unused") - private DatabasePartition ranges; - - public StateManager(String url, Properties info, DatabasePartition ranges, String sqlDatabase) throws MDBCServiceException { - this.sqlDatabase = sqlDatabase; - this.ranges = ranges; - this.url = url; - this.info = info; - this.transactionInfo = new TxCommitProgress(); - //\fixme this is not really used, delete! - String cassandraUrl = info.getProperty(Configuration.KEY_CASSANDRA_URL, Configuration.CASSANDRA_URL_DEFAULT); - String mixin = info.getProperty(Configuration.KEY_MUSIC_MIXIN_NAME, Configuration.MUSIC_MIXIN_DEFAULT); - init(mixin, cassandraUrl); - } - - protected void init(String mixin, String cassandraUrl) throws MDBCServiceException { - this.musicManager = MixinFactory.createMusicInterface(mixin, cassandraUrl, info,ranges); - this.musicManager.createKeyspace(); - try { - this.musicManager.initializeMetricDataStructures(); - } catch (MDBCServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); - throw(e); - } - MusicMixin.loadProperties(); - this.mdbcConnections = new HashMap<>(); - initSqlDatabase(); - } - - protected void initSqlDatabase() throws MDBCServiceException { - try { - //\TODO: pass the driver as a variable - Class.forName("org.mariadb.jdbc.Driver"); - } - catch (ClassNotFoundException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); - return; - } - try { - Connection sqlConnection = DriverManager.getConnection(this.url, this.info); - StringBuilder sql = new StringBuilder("CREATE DATABASE IF NOT EXISTS ") - .append(sqlDatabase) - .append(";"); - Statement stmt = sqlConnection.createStatement(); - stmt.execute(sql.toString()); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); - throw new MDBCServiceException(e.getMessage()); - } - } - - public void CloseConnection(String connectionId){ - //\TODO check if there is a race condition - if(mdbcConnections.containsKey(connectionId)) { - transactionInfo.deleteTxProgress(connectionId); - try { - Connection conn = mdbcConnections.get(connectionId); - if(conn!=null) - conn.close(); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); - } - mdbcConnections.remove(connectionId); - } - } - - public void OpenConnection(String id, Properties information){ - if(!mdbcConnections.containsKey(id)){ - Connection sqlConnection; - MdbcConnection newConnection; - //Create connection to local SQL DB - //\TODO: create function to generate connection outside of open connection and get connection - try { - //\TODO: pass the driver as a variable - Class.forName("org.mariadb.jdbc.Driver"); - } - catch (ClassNotFoundException e) { - // TODO Auto-generated catch block - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); - return; - } - try { - sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - sqlConnection = null; - } - //Create MDBC connection - try { - newConnection = new MdbcConnection(id, this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges); - } catch (MDBCServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - newConnection = null; - return; - } - logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id); - transactionInfo.createNewTransactionTracker(id, sqlConnection); - if(newConnection != null) { - mdbcConnections.put(id,newConnection); - } - } - } - - /** - * This function returns the connection to the corresponding transaction - * @param id of the transaction, created using - * @return - */ - public Connection GetConnection(String id) { - if(mdbcConnections.containsKey(id)) { - //\TODO: Verify if this make sense - // Intent: reinitialize transaction progress, when it already completed the previous tx for the same connection - if(transactionInfo.isComplete(id)) { - transactionInfo.reinitializeTxProgress(id); - } - return mdbcConnections.get(id); - } - - Connection sqlConnection; - MdbcConnection newConnection; - try { - //TODO: pass the driver as a variable - Class.forName("org.mariadb.jdbc.Driver"); - } - catch (ClassNotFoundException e) { - // TODO Auto-generated catch block - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - } - - //Create connection to local SQL DB - try { - sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info); - } catch (SQLException e) { - logger.error("sql connection was not created correctly"); - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - sqlConnection = null; - } - //Create MDBC connection - try { - newConnection = new MdbcConnection(id,this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges); - } catch (MDBCServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); - newConnection = null; - } - logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id); - - transactionInfo.createNewTransactionTracker(id, sqlConnection); - if(newConnection != null) { - mdbcConnections.put(id,newConnection); - } - return newConnection; - } - - public void InitializeSystem() { - //\TODO Prefetch data to system using the data ranges as guide - throw new UnsupportedOperationException("Function initialize system needs to be implemented id MdbcStateManager"); - } -} diff --git a/src/main/java/com/att/research/mdbc/TableInfo.java b/src/main/java/com/att/research/mdbc/TableInfo.java deleted file mode 100755 index 583ba73..0000000 --- a/src/main/java/com/att/research/mdbc/TableInfo.java +++ /dev/null @@ -1,75 +0,0 @@ -package com.att.research.mdbc; - -import java.sql.Types; -import java.util.ArrayList; -import java.util.List; - -/** - * Information about a table in the local database. It consists of three ordered list, which should all have the - * same length. A list of column names, a list of DB column types, and a list of booleans specifying which columns are keys. - * @author Robert P. Eby - */ -public class TableInfo { - /** An ordered list of the column names in this table */ - public List columns; - /** An ordered list of the column types in this table; the types are integers taken from {@link java.sql.Types}. */ - public List coltype; - /** An ordered list of booleans indicating if a column is a primary key column or not. */ - public List iskey; - - /** Construct an (initially) empty TableInfo. */ - public TableInfo() { - columns = new ArrayList(); - coltype = new ArrayList(); - iskey = new ArrayList(); - } - /** - * Check whether the column whose name is name is a primary key column. - * @param name the column name - * @return true if it is, false otherwise - */ - public boolean iskey(String name) { - for (int i = 0; i < columns.size(); i++) { - if (this.columns.get(i).equalsIgnoreCase(name)) - return this.iskey.get(i); - } - return false; - } - /** - * Get the type of the column whose name is name. - * @param name the column name - * @return the column type or Types.NULL - */ - public int getColType(String name) { - for (int i = 0; i < columns.size(); i++) { - if (this.columns.get(i).equalsIgnoreCase(name)) - return this.coltype.get(i); - } - return Types.NULL; - } - - /** - * Checks if this table has a primary key - * @return - */ - public boolean hasKey() { - for (Boolean b: iskey) { - if (b) { - return true; - } - } - return false; - } - - public List getKeyColumns(){ - List keys = new ArrayList(); - int idx = 0; - for (Boolean b: iskey) { - if (b) { - keys.add(this.columns.get(idx)); - } - idx++; - } - return keys; - } -} diff --git a/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java b/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java deleted file mode 100644 index d74dafb..0000000 --- a/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.att.research.mdbc.configurations; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.DatabasePartition; -import com.att.research.mdbc.MDBCUtils; -import com.att.research.mdbc.Range; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; - -public class NodeConfiguration { - - private static transient final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(NodeConfiguration.class); - - public String sqlDatabaseName; - public DatabasePartition partition; - public String nodeName; - - public NodeConfiguration(String tables, String mriIndex, String mriTableName, String partitionId, String sqlDatabaseName, String node, String redoRecordsTable){ - partition = new DatabasePartition(toRanges(tables), mriIndex, mriTableName, partitionId, null, redoRecordsTable) ; - this.sqlDatabaseName = sqlDatabaseName; - this.nodeName = node; - } - - protected Set toRanges(String tables){ - Set newRange = new HashSet<>(); - String[] tablesArray=tables.split(","); - for(String table: tablesArray) { - newRange.add(new Range(table)); - } - return newRange; - } - - public String toJson() { - GsonBuilder builder = new GsonBuilder(); - builder.setPrettyPrinting().serializeNulls();; - Gson gson = builder.create(); - return gson.toJson(this); - } - - public void saveToFile(String file){ - try { - String serialized = this.toJson(); - MDBCUtils.saveToFile(serialized,file,LOG); - } catch (IOException e) { - e.printStackTrace(); - // Exit with error - System.exit(1); - } - } - - public static NodeConfiguration readJsonFromFile( String filepath) throws FileNotFoundException { - BufferedReader br; - try { - br = new BufferedReader( - new FileReader(filepath)); - } catch (FileNotFoundException e) { - LOG.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e); - throw e; - } - Gson gson = new Gson(); - NodeConfiguration config = gson.fromJson(br, NodeConfiguration.class); - return config; - } -} diff --git a/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java b/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java deleted file mode 100644 index b86d058..0000000 --- a/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java +++ /dev/null @@ -1,179 +0,0 @@ -package com.att.research.mdbc.configurations; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.DatabaseOperations; -import com.att.research.mdbc.RedoRow; -import com.att.research.mdbc.mixins.CassandraMixin; -import com.google.gson.Gson; -import org.onap.music.datastore.PreparedQueryObject; -import org.onap.music.exceptions.MusicServiceException; -import org.onap.music.main.MusicCore; - -import java.io.BufferedReader; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.util.ArrayList; -import java.util.List; - -public class TablesConfiguration { - - private final String TIT_TABLE_NAME = "transactioninformation"; - private final String MUSIC_TX_DIGEST_TABLE_NAME = "musictxdigest"; - - private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TablesConfiguration.class); - private List partitions; - private String internalNamespace; - private int internalReplicationFactor; - private String musicNamespace; - private String tableToPartitionName; - private String partitionInformationTableName; - private String redoHistoryTableName; - private String sqlDatabaseName; - - public TablesConfiguration(){} - - /** - * This functions initalize all the corresponding tables and rows - * @return a list of node configurations to be used when starting each of the servers - * @throws MDBCServiceException - * @apiNote This function assumes that when used, there is not associated redo history in the tables to the tables that are going to be managed by this configuration file - */ - public List initializeAndCreateNodeConfigurations() throws MDBCServiceException { - initInternalNamespace(); - DatabaseOperations.createNamespace(musicNamespace, internalReplicationFactor); - List nodeConfigs = new ArrayList<>(); - String ttpName = (tableToPartitionName==null || tableToPartitionName.isEmpty())?CassandraMixin.TABLE_TO_PARTITION_TABLE_NAME:tableToPartitionName; - DatabaseOperations.CreateTableToPartitionTable(musicNamespace,ttpName); - String pitName = (partitionInformationTableName==null || partitionInformationTableName.isEmpty())?CassandraMixin.PARTITION_INFORMATION_TABLE_NAME:partitionInformationTableName; - DatabaseOperations.CreatePartitionInfoTable(musicNamespace,pitName); - String rhName = (redoHistoryTableName==null || redoHistoryTableName.isEmpty())?CassandraMixin.REDO_HISTORY_TABLE_NAME:redoHistoryTableName; - DatabaseOperations.CreateRedoHistoryTable(musicNamespace,rhName); - if(partitions == null){ - logger.error("Partitions was not correctly initialized"); - throw new MDBCServiceException("Partition was not correctly initialized"); - } - for(PartitionInformation partitionInfo : partitions){ - String mriTableName = partitionInfo.mriTableName; - mriTableName = (mriTableName==null || mriTableName.isEmpty())?TIT_TABLE_NAME:mriTableName; - //0) Create the corresponding Music Range Information table - DatabaseOperations.CreateMusicRangeInformationTable(musicNamespace,mriTableName); - String musicTxDigestTableName = partitionInfo.mtxdTableName; - musicTxDigestTableName = (musicTxDigestTableName==null || musicTxDigestTableName.isEmpty())? MUSIC_TX_DIGEST_TABLE_NAME :musicTxDigestTableName; - DatabaseOperations.CreateMusicTxDigest(-1,musicNamespace,musicTxDigestTableName); - String partitionId; - if(partitionInfo.partitionId==null || partitionInfo.partitionId.isEmpty()){ - if(partitionInfo.replicationFactor==0){ - logger.error("Replication factor and partition id are both empty, and this is an invalid configuration" ); - throw new MDBCServiceException("Replication factor and partition id are both empty, and this is an invalid configuration"); - } - //1) Create a row in the partition info table - partitionId = DatabaseOperations.createPartitionInfoRow(musicNamespace,pitName,partitionInfo.replicationFactor,partitionInfo.tables,null); - - } - else{ - partitionId = partitionInfo.partitionId; - } - //2) Create a row in the transaction information table - String mriTableIndex = DatabaseOperations.CreateEmptyTitRow(musicNamespace,mriTableName,partitionId,null); - //3) Add owner and tit information to partition info table - RedoRow newRedoRow = new RedoRow(mriTableName,mriTableIndex); - DatabaseOperations.updateRedoRow(musicNamespace,pitName,partitionId,newRedoRow,partitionInfo.owner,null); - //4) Update ttp with the new partition - for(String table: partitionInfo.tables) { - DatabaseOperations.updateTableToPartition(musicNamespace, ttpName, table, partitionId, null); - } - //5) Add it to the redo history table - DatabaseOperations.createRedoHistoryBeginRow(musicNamespace,rhName,newRedoRow,partitionId,null); - //6) Create config for this node - nodeConfigs.add(new NodeConfiguration(String.join(",",partitionInfo.tables),mriTableIndex,mriTableName,partitionId,sqlDatabaseName,partitionInfo.owner,musicTxDigestTableName)); - } - return nodeConfigs; - } - - private void initInternalNamespace() throws MDBCServiceException { - DatabaseOperations.createNamespace(internalNamespace,internalReplicationFactor); - StringBuilder createKeysTableCql = new StringBuilder("CREATE TABLE IF NOT EXISTS ") - .append(internalNamespace) - .append(".unsynced_keys (key text PRIMARY KEY);"); - PreparedQueryObject queryObject = new PreparedQueryObject(); - queryObject.appendQueryString(createKeysTableCql.toString()); - try { - MusicCore.createTable(internalNamespace,"unsynced_keys", queryObject,"critical"); - } catch (MusicServiceException e) { - logger.error("Error creating unsynced keys table" ); - throw new MDBCServiceException("Error creating unsynced keys table"); - } - } - - public static TablesConfiguration readJsonFromFile(String filepath) throws FileNotFoundException { - BufferedReader br; - try { - br = new BufferedReader( - new FileReader(filepath)); - } catch (FileNotFoundException e) { - logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e); - throw e; - } - Gson gson = new Gson(); - TablesConfiguration config = gson.fromJson(br, TablesConfiguration.class); - return config; - } - - public class PartitionInformation{ - private List tables; - private String owner; - private String mriTableName; - private String mtxdTableName; - private String partitionId; - private int replicationFactor; - - public List getTables() { - return tables; - } - - public void setTables(List tables) { - this.tables = tables; - } - - public String getOwner() { - return owner; - } - - public void setOwner(String owner) { - this.owner = owner; - } - - public String getMriTableName() { - return mriTableName; - } - - public void setMriTableName(String mriTableName) { - this.mriTableName = mriTableName; - } - - public String getPartitionId() { - return partitionId; - } - - public void setPartitionId(String partitionId) { - this.partitionId = partitionId; - } - - public int getReplicationFactor() { - return replicationFactor; - } - - public void setReplicationFactor(int replicationFactor) { - this.replicationFactor = replicationFactor; - } - - public String getMtxdTableName(){ - return mtxdTableName; - } - - public void setMtxdTableName(String mtxdTableName) { - this.mtxdTableName = mtxdTableName; - } - } -} diff --git a/src/main/java/com/att/research/mdbc/configurations/config-0.json b/src/main/java/com/att/research/mdbc/configurations/config-0.json deleted file mode 100644 index 2207a52..0000000 --- a/src/main/java/com/att/research/mdbc/configurations/config-0.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "sqlDatabaseName": "test", - "partition": { - "musicRangeInformationTable": "transactioninformation", - "musicRangeInformationIndex": "259a7a7c-f741-44ae-8d6e-227a02ddc96e", - "musicTxDigestTable": "musictxdigest", - "partitionId": "ad766447-1adf-4800-aade-9f31a356ab4b", - "lockId": "", - "ranges": [ - { - "table": "table11" - } - ] - }, - "nodeName": "" -} diff --git a/src/main/java/com/att/research/mdbc/configurations/ranges.json b/src/main/java/com/att/research/mdbc/configurations/ranges.json deleted file mode 100644 index 2a792e8..0000000 --- a/src/main/java/com/att/research/mdbc/configurations/ranges.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "musicRangeInformationTable": "transactioninformation", - "musicRangeInformationIndex": "d0e8ef2e-aeca-4261-8d9d-1679f560b85b", - "partitionId": "798110cf-9c61-4db2-9446-cb2dbab5a143", - "lockId": "", - "ranges": [ - { - "table": "table1" - }, - { - "table": "table2" - } - ] -} diff --git a/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json b/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json deleted file mode 100644 index e67dd0b..0000000 --- a/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "partitions" : [ - { - "tables":["table11"], - "owner":"", - "mriTableName":"musicrangeinformation", - "mtxdTableName":"musictxdigest", - "partitionId":"", - "replicationFactor":1 - } - ], - "musicNamespace":"namespace", - "tableToPartitionName":"tabletopartition", - "partitionInformationTableName":"partitioninfo", - "redoHistoryTableName":"redohistory", - "sqlDatabaseName":"test", - "internalNamespace":"music_internal", - "internalReplicationFactor":1 -} diff --git a/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java b/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java deleted file mode 100644 index cb43efe..0000000 --- a/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.att.research.mdbc.examples; - -import java.sql.*; -import org.apache.calcite.avatica.remote.Driver; - -public class EtdbTestClient { - - public static class Hr { - public final Employee[] emps = { - new Employee(100, "Bill"), - new Employee(200, "Eric"), - new Employee(150, "Sebastian"), - }; - } - - public static class Employee { - public final int empid; - public final String name; - - public Employee(int empid, String name) { - this.empid = empid; - this.name = name; - } - } - - public static void main(String[] args){ - try { - Class.forName("org.apache.calcite.avatica.remote.Driver"); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - System.exit(1); - } - Connection connection; - try { - connection = DriverManager.getConnection("jdbc:avatica:remote:url=http://localhost:30000;serialization=protobuf"); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - try { - connection.setAutoCommit(false); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - - final String sql = "CREATE TABLE IF NOT EXISTS Persons (\n" + - " PersonID int,\n" + - " LastName varchar(255),\n" + - " FirstName varchar(255),\n" + - " Address varchar(255),\n" + - " City varchar(255)\n" + - ");"; - Statement stmt; - try { - stmt = connection.createStatement(); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - boolean execute; - try { - execute = stmt.execute(sql); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - if (execute) { - try { - connection.commit(); - } catch (SQLException e) { - e.printStackTrace(); - } - } - - try { - stmt.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - - final String insertSQL = "INSERT INTO Persons VALUES (1, 'Martinez', 'Juan', 'KACB', 'ATLANTA');"; - Statement insertStmt; - try { - insertStmt = connection.createStatement(); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - try { - execute = insertStmt.execute(insertSQL); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - try { - connection.commit(); - } catch (SQLException e) { - e.printStackTrace(); - return; - } - - try { - stmt.close(); - insertStmt.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - - try { - connection.commit(); - connection.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - - - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java b/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java deleted file mode 100755 index 1e57e60..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java +++ /dev/null @@ -1,287 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.sql.Types; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import org.json.JSONObject; -import org.json.JSONTokener; -import org.onap.music.datastore.PreparedQueryObject; -import org.onap.music.exceptions.MusicServiceException; -import org.onap.music.main.MusicCore; -import org.onap.music.main.ReturnType; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.DatabasePartition; -import com.att.research.mdbc.TableInfo; -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; - -/** - * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence - * to calls to the user's DB. It stores dirty row references in one table (called DIRTY____) rather than one dirty - * table per real table (as {@link com.att.research.mdbc.mixins.CassandraMixin} does). - * - * @author Robert P. Eby - */ -public class Cassandra2Mixin extends CassandraMixin { - private static final String DIRTY_TABLE = "DIRTY____"; // it seems Cassandra won't allow __DIRTY__ - private boolean dirty_table_created = false; - - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Cassandra2Mixin.class); - - public Cassandra2Mixin() { - super(); - } - - public Cassandra2Mixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException { - super(url, info,ranges); - } - - /** - * Get the name of this MusicInterface mixin object. - * @return the name - */ - @Override - public String getMixinName() { - return "cassandra2"; - } - /** - * Do what is needed to close down the MUSIC connection. - */ - @Override - public void close() { - super.close(); - } - - /** - * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables. - * The keyspace name comes from the initialization properties passed to the JDBC driver. - */ - @Override - public void createKeyspace() { - super.createKeyspace(); - } - - /** - * This method performs all necessary initialization in Music/Cassandra to store the table tableName. - * @param tableName the table to initialize MUSIC for - */ - @Override - public void initializeMusicForTable(TableInfo ti, String tableName) { - super.initializeMusicForTable(ti, tableName); - } - - /** - * Create a dirty row table for the real table tableName. The primary keys columns from the real table are recreated in - * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC. - * @param tableName the table to create a "dirty" table for - */ - @Override - public void createDirtyRowTable(TableInfo ti, String tableName) { - if (!dirty_table_created) { - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (tablename TEXT, replica TEXT, keyset TEXT, PRIMARY KEY(tablename, replica, keyset));", music_ns, DIRTY_TABLE); - executeMusicWriteQuery(cql); - dirty_table_created = true; - } - } - /** - * Drop the dirty row table for tableName from MUSIC. - * @param tableName the table being dropped - */ - @Override - public void dropDirtyRowTable(String tableName) { - // no-op - } - - private String buildJSON(TableInfo ti, String tableName, Object[] keys) { - // Build JSON string representing this keyset - JSONObject jo = new JSONObject(); - int j = 0; - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - jo.put(ti.columns.get(i), keys[j++]); - } - } - return jo.toString(); - } - /** - * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys - * @param tableName the table we are removing dirty entries from - * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. - */ - @Override - public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) { - String cql = String.format("DELETE FROM %s.%s WHERE tablename = ? AND replica = ? AND keyset = ?;", music_ns, DIRTY_TABLE); - //Session sess = getMusicSession(); - //PreparedStatement ps = getPreparedStatementFromCache(cql); - Object[] values = new Object[] { tableName, myId, keys }; - logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]); - - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - pQueryObject.addValue(tableName); - pQueryObject.addValue(myId); - pQueryObject.addValue(keys); - ReturnType rt = MusicCore.eventualPut(pQueryObject); - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage()); - } - /*BoundStatement bound = ps.bind(values); - bound.setReadTimeoutMillis(60000); - synchronized (sess) { - sess.execute(bound); - }*/ - } - /** - * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica, - * and consist of a Map of primary key column names and values. - * @param tableName the table we are querying for - * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row". - */ - @SuppressWarnings("deprecation") - @Override - public List> getDirtyRows(TableInfo ti, String tableName) { - String cql = String.format("SELECT keyset FROM %s.%s WHERE tablename = ? AND replica = ?;", music_ns, DIRTY_TABLE); - logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + tableName + " " + myId); - - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - pQueryObject.addValue(tableName); - pQueryObject.addValue(myId); - ResultSet results = null; - try { - results = MusicCore.get(pQueryObject); - } catch (MusicServiceException e) { - e.printStackTrace(); - } - /*Session sess = getMusicSession(); - PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(new Object[] { tableName, myId }); - bound.setReadTimeoutMillis(60000); - ResultSet results = null; - synchronized (sess) { - results = sess.execute(bound); - }*/ - List> list = new ArrayList>(); - for (Row row : results) { - String json = row.getString("keyset"); - JSONObject jo = new JSONObject(new JSONTokener(json)); - Map objs = new HashMap(); - for (String colname : jo.keySet()) { - int coltype = ti.getColType(colname); - switch (coltype) { - case Types.BIGINT: - objs.put(colname, jo.getLong(colname)); - break; - case Types.BOOLEAN: - objs.put(colname, jo.getBoolean(colname)); - break; - case Types.BLOB: - logger.error(EELFLoggerDelegate.errorLogger,"WE DO NOT SUPPORT BLOBS AS PRIMARY KEYS!! COLUMN NAME="+colname); - // throw an exception here??? - break; - case Types.DOUBLE: - objs.put(colname, jo.getDouble(colname)); - break; - case Types.INTEGER: - objs.put(colname, jo.getInt(colname)); - break; - case Types.TIMESTAMP: - objs.put(colname, new Date(jo.getString(colname))); - break; - case Types.VARCHAR: - default: - objs.put(colname, jo.getString(colname)); - break; - } - } - list.add(objs); - } - return list; - } - - /** - * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first. - * @param tableName This is the table that has been dropped - */ - @Override - public void clearMusicForTable(String tableName) { - super.clearMusicForTable(tableName); - } - /** - * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the - * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates - * it to the other replicas. - * - * @param tableName This is the table that has changed. - * @param oldRow This is a copy of the old row being deleted - */ - public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) { - super.deleteFromEntityTableInMusic(ti, tableName, oldRow); - } - /** - * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local - * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL - * @param tableName This is the table on which the select is being performed - */ - @Override - public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) { - super.readDirtyRowsAndUpdateDb(dbi, tableName); - } - - /** - * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the - * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates - * it to the other replicas. - * - * @param tableName This is the table that has changed. - * @param changedRow This is information about the row that has changed - */ - @Override - public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) { - super.updateDirtyRowAndEntityTableInMusic(ti, tableName, changedRow); - } - - /** - * Mark rows as "dirty" in the dirty rows table for tableName. Rows are marked for all replicas but - * this one (this replica already has the up to date data). - * @param tableName the table we are marking dirty - * @param keys an ordered list of the values being put into the table. The values that correspond to the tables' - * primary key are copied into the dirty row table. - */ - @Deprecated - public void markDirtyRow(TableInfo ti, String tableName, Object[] keys) { - String cql = String.format("INSERT INTO %s.%s (tablename, replica, keyset) VALUES (?, ?, ?);", music_ns, DIRTY_TABLE); - /*Session sess = getMusicSession(); - PreparedStatement ps = getPreparedStatementFromCache(cql);*/ - @SuppressWarnings("unused") - Object[] values = new Object[] { tableName, "", buildJSON(ti, tableName, keys) }; - PreparedQueryObject pQueryObject = null; - for (String repl : allReplicaIds) { - /*if (!repl.equals(myId)) { - values[1] = repl; - logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]); - - BoundStatement bound = ps.bind(values); - bound.setReadTimeoutMillis(60000); - synchronized (sess) { - sess.execute(bound); - } - }*/ - pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - pQueryObject.addValue(tableName); - pQueryObject.addValue(repl); - pQueryObject.addValue(buildJSON(ti, tableName, keys)); - ReturnType rt = MusicCore.eventualPut(pQueryObject); - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - System.out.println("Failure while critical put..."+rt.getMessage()); - } - } - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java b/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java deleted file mode 100755 index 63c95c2..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java +++ /dev/null @@ -1,1261 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.io.IOException; -import java.io.Reader; -import java.nio.ByteBuffer; -import java.sql.Types; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; - -import com.att.research.mdbc.*; -import com.att.research.mdbc.tables.PartitionInformation; -import com.att.research.mdbc.tables.MusixTxDigestId; -import com.att.research.mdbc.tables.StagingTable; -import com.att.research.mdbc.tables.MriReference; -import com.att.research.mdbc.tables.MusicRangeInformationRow; -import com.att.research.mdbc.tables.TxCommitProgress; - -import org.json.JSONObject; -import org.onap.music.datastore.CassaLockStore; -import org.onap.music.datastore.PreparedQueryObject; -import org.onap.music.exceptions.MusicLockingException; -import org.onap.music.exceptions.MusicQueryException; -import org.onap.music.exceptions.MusicServiceException; -import org.onap.music.main.MusicCore; -import org.onap.music.main.ResultType; -import org.onap.music.main.ReturnType; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.logging.EELFLoggerDelegate; -import com.datastax.driver.core.BoundStatement; -import com.datastax.driver.core.ColumnDefinitions; -import com.datastax.driver.core.DataType; -import com.datastax.driver.core.PreparedStatement; -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.Session; - -/** - * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence - * to calls to the user's DB. It does not do any table or row locking. - * - *

This code only supports the following limited list of H2 and Cassandra data types:

- * - * - * - * - * - * - * - * - * - * - *
H2 Data TypeMapped to Cassandra Data Type
BIGINTBIGINT
BOOLEANBOOLEAN
CLOBBLOB
DOUBLEDOUBLE
INTEGERINT
TIMESTAMPTIMESTAMP
VARBINARYBLOB
VARCHARVARCHAR
- * - * @author Robert P. Eby - */ -public class CassandraMixin implements MusicInterface { - /** The property name to use to identify this replica to MusicSqlManager */ - public static final String KEY_MY_ID = "myid"; - /** The property name to use for the comma-separated list of replica IDs. */ - public static final String KEY_REPLICAS = "replica_ids"; - /** The property name to use to identify the IP address for Cassandra. */ - public static final String KEY_MUSIC_ADDRESS = "music_address"; - /** The property name to use to provide the replication factor for Cassandra. */ - public static final String KEY_MUSIC_RFACTOR = "music_rfactor"; - /** The property name to use to provide the replication factor for Cassandra. */ - public static final String KEY_MUSIC_NAMESPACE = "music_namespace"; - /** The default property value to use for the Cassandra keyspace. */ - public static final String DEFAULT_MUSIC_KEYSPACE = "mdbc"; - /** The default property value to use for the Cassandra IP address. */ - public static final String DEFAULT_MUSIC_ADDRESS = "localhost"; - /** The default property value to use for the Cassandra replication factor. */ - public static final int DEFAULT_MUSIC_RFACTOR = 1; - /** The default primary string column, if none is provided. */ - public static final String MDBC_PRIMARYKEY_NAME = "mdbc_cuid"; - /** Type of the primary key, if none is defined by the user */ - public static final String MDBC_PRIMARYKEY_TYPE = "uuid"; - /** Namespace for the tables in MUSIC (Cassandra) */ - public static final String DEFAULT_MUSIC_NAMESPACE = "namespace"; - - /** Name of the tables required for MDBC */ - public static final String TABLE_TO_PARTITION_TABLE_NAME = "tabletopartition"; - public static final String PARTITION_INFORMATION_TABLE_NAME = "partitioninfo"; - public static final String REDO_HISTORY_TABLE_NAME= "redohistory"; - //\TODO Add logic to change the names when required and create the tables when necessary - private String musicTxDigestTableName = "musictxdigest"; - private String musicRangeInformationTableName = "musicrangeinformation"; - - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(CassandraMixin.class); - - private static final Map typemap = new HashMap<>(); - static { - // We only support the following type mappings currently (from DB -> Cassandra). - // Anything else will likely cause a NullPointerException - typemap.put(Types.BIGINT, "BIGINT"); // aka. IDENTITY - typemap.put(Types.BLOB, "VARCHAR"); - typemap.put(Types.BOOLEAN, "BOOLEAN"); - typemap.put(Types.CLOB, "BLOB"); - typemap.put(Types.DATE, "VARCHAR"); - typemap.put(Types.DOUBLE, "DOUBLE"); - typemap.put(Types.DECIMAL, "DECIMAL"); - typemap.put(Types.INTEGER, "INT"); - //typemap.put(Types.TIMESTAMP, "TIMESTAMP"); - typemap.put(Types.SMALLINT, "SMALLINT"); - typemap.put(Types.TIMESTAMP, "VARCHAR"); - typemap.put(Types.VARBINARY, "BLOB"); - typemap.put(Types.VARCHAR, "VARCHAR"); - typemap.put(Types.CHAR, "VARCHAR"); - //The "Hacks", these don't have a direct mapping - //typemap.put(Types.DATE, "VARCHAR"); - //typemap.put(Types.DATE, "TIMESTAMP"); - } - - protected DatabasePartition ranges; - protected final String music_ns; - protected final String myId; - protected final String[] allReplicaIds; - private final String musicAddress; - private final int music_rfactor; - private MusicConnector mCon = null; - private Session musicSession = null; - private boolean keyspace_created = false; - private Map ps_cache = new HashMap<>(); - private Set in_progress = Collections.synchronizedSet(new HashSet()); - - public CassandraMixin() { - //this.logger = null; - this.musicAddress = null; - this.music_ns = null; - this.music_rfactor = 0; - this.myId = null; - this.allReplicaIds = null; - } - - public CassandraMixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException { - this.ranges = ranges; - // Default values -- should be overridden in the Properties - // Default to using the host_ids of the various peers as the replica IDs (this is probably preferred) - this.musicAddress = info.getProperty(KEY_MUSIC_ADDRESS, DEFAULT_MUSIC_ADDRESS); - logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: musicAddress="+musicAddress); - - String s = info.getProperty(KEY_MUSIC_RFACTOR); - this.music_rfactor = (s == null) ? DEFAULT_MUSIC_RFACTOR : Integer.parseInt(s); - - this.myId = info.getProperty(KEY_MY_ID, getMyHostId()); - logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: myId="+myId); - - - this.allReplicaIds = info.getProperty(KEY_REPLICAS, getAllHostIds()).split(","); - logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: allReplicaIds="+info.getProperty(KEY_REPLICAS, this.myId)); - - this.music_ns = info.getProperty(KEY_MUSIC_NAMESPACE,DEFAULT_MUSIC_NAMESPACE); - logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: music_ns="+music_ns); - musicRangeInformationTableName = "musicrangeinformation"; - createMusicKeyspace(); - } - - private void createMusicKeyspace() throws MusicServiceException { - - Map replicationInfo = new HashMap<>(); - replicationInfo.put("'class'", "'SimpleStrategy'"); - replicationInfo.put("'replication_factor'", music_rfactor); - - PreparedQueryObject queryObject = new PreparedQueryObject(); - queryObject.appendQueryString( - "CREATE KEYSPACE " + this.music_ns + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":")); - - try { - MusicCore.nonKeyRelatedPut(queryObject, "eventual"); - } catch (MusicServiceException e) { - if (e.getMessage().equals("Keyspace "+this.music_ns+" already exists")) { - // ignore - } else { - throw(e); - } - } - } - - private String getMyHostId() { - ResultSet rs = executeMusicRead("SELECT HOST_ID FROM SYSTEM.LOCAL"); - Row row = rs.one(); - return (row == null) ? "UNKNOWN" : row.getUUID("HOST_ID").toString(); - } - private String getAllHostIds() { - ResultSet results = executeMusicRead("SELECT HOST_ID FROM SYSTEM.PEERS"); - StringBuilder sb = new StringBuilder(myId); - for (Row row : results) { - sb.append(","); - sb.append(row.getUUID("HOST_ID").toString()); - } - return sb.toString(); - } - - /** - * Get the name of this MusicInterface mixin object. - * @return the name - */ - @Override - public String getMixinName() { - return "cassandra"; - } - /** - * Do what is needed to close down the MUSIC connection. - */ - @Override - public void close() { - if (musicSession != null) { - musicSession.close(); - musicSession = null; - } - } - @Override - public void initializeMetricDataStructures() throws MDBCServiceException { - try { - DatabaseOperations.CreateMusicTxDigest(-1, music_ns, musicTxDigestTableName);//\TODO If we start partitioning the data base, we would need to use the redotable number - DatabaseOperations.CreateMusicRangeInformationTable(music_ns, musicRangeInformationTableName); - DatabaseOperations.CreateTableToPartitionTable(music_ns, TABLE_TO_PARTITION_TABLE_NAME); - DatabaseOperations.CreatePartitionInfoTable(music_ns, PARTITION_INFORMATION_TABLE_NAME); - DatabaseOperations.CreateRedoHistoryTable(music_ns, REDO_HISTORY_TABLE_NAME); - } - catch(MDBCServiceException e){ - logger.error(EELFLoggerDelegate.errorLogger,"Error creating tables in MUSIC"); - } - } - - /** - * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables. - * The keyspace name comes from the initialization properties passed to the JDBC driver. - */ - @Override - public void createKeyspace() { - if (keyspace_created == false) { - String cql = String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : %d };", music_ns, music_rfactor); - executeMusicWriteQuery(cql); - keyspace_created = true; - } - } - - /** - * This method performs all necessary initialization in Music/Cassandra to store the table tableName. - * @param tableName the table to initialize MUSIC for - */ - @Override - public void initializeMusicForTable(TableInfo ti, String tableName) { - /** - * This code creates two tables for every table in SQL: - * (i) a table with the exact same name as the SQL table storing the SQL data. - * (ii) a "dirty bits" table that stores the keys in the Cassandra table that are yet to be - * updated in the SQL table (they were written by some other node). - */ - StringBuilder fields = new StringBuilder(); - StringBuilder prikey = new StringBuilder(); - String pfx = "", pfx2 = ""; - for (int i = 0; i < ti.columns.size(); i++) { - fields.append(pfx) - .append(ti.columns.get(i)) - .append(" ") - .append(typemap.get(ti.coltype.get(i))); - if (ti.iskey.get(i)) { - // Primary key column - prikey.append(pfx2).append(ti.columns.get(i)); - pfx2 = ", "; - } - pfx = ", "; - } - if (prikey.length()==0) { - fields.append(pfx).append(MDBC_PRIMARYKEY_NAME) - .append(" ") - .append(MDBC_PRIMARYKEY_TYPE); - prikey.append("mdbc_cuid"); - } - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", music_ns, tableName, fields.toString(), prikey.toString()); - executeMusicWriteQuery(cql); - } - - // ************************************************** - // Dirty Tables (in MUSIC) methods - // ************************************************** - - /** - * Create a dirty row table for the real table tableName. The primary keys columns from the real table are recreated in - * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC. - * @param tableName the table to create a "dirty" table for - */ - @Override - public void createDirtyRowTable(TableInfo ti, String tableName) { - // create dirtybitsTable at all replicas -// for (String repl : allReplicaIds) { -//// String dirtyRowsTableName = "dirty_"+tableName+"_"+allReplicaIds[i]; -//// String dirtyTableQuery = "CREATE TABLE IF NOT EXISTS "+music_ns+"."+ dirtyRowsTableName+" (dirtyRowKeys text PRIMARY KEY);"; -// cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s_%s (dirtyRowKeys TEXT PRIMARY KEY);", music_ns, tableName, repl); -// executeMusicWriteQuery(cql); -// } - StringBuilder ddl = new StringBuilder("REPLICA__ TEXT"); - StringBuilder cols = new StringBuilder("REPLICA__"); - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - // Only use the primary keys columns in the "Dirty" table - ddl.append(", ") - .append(ti.columns.get(i)) - .append(" ") - .append(typemap.get(ti.coltype.get(i))); - cols.append(", ").append(ti.columns.get(i)); - } - } - if(cols.length()==0) { - //fixme - System.err.println("Create dirty row table found no primary key"); - } - ddl.append(", PRIMARY KEY(").append(cols).append(")"); - String cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s (%s);", music_ns, tableName, ddl.toString()); - executeMusicWriteQuery(cql); - } - /** - * Drop the dirty row table for tableName from MUSIC. - * @param tableName the table being dropped - */ - @Override - public void dropDirtyRowTable(String tableName) { - String cql = String.format("DROP TABLE %s.DIRTY_%s;", music_ns, tableName); - executeMusicWriteQuery(cql); - } - /** - * Mark rows as "dirty" in the dirty rows table for tableName. Rows are marked for all replicas but - * this one (this replica already has the up to date data). - * @param tableName the table we are marking dirty - * @param keys an ordered list of the values being put into the table. The values that correspond to the tables' - * primary key are copied into the dirty row table. - */ - @Override - public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) { - Object[] keyObj = getObjects(ti,tableName, keys); - StringBuilder cols = new StringBuilder("REPLICA__"); - PreparedQueryObject pQueryObject = null; - StringBuilder vals = new StringBuilder("?"); - List vallist = new ArrayList(); - vallist.add(""); // placeholder for replica - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - cols.append(", ").append(ti.columns.get(i)); - vals.append(", ").append("?"); - vallist.add(keyObj[i]); - } - } - if(cols.length()==0) { - //FIXME - System.err.println("markDIrtyRow need to fix primary key"); - } - String cql = String.format("INSERT INTO %s.DIRTY_%s (%s) VALUES (%s);", music_ns, tableName, cols.toString(), vals.toString()); - /*Session sess = getMusicSession(); - PreparedStatement ps = getPreparedStatementFromCache(cql);*/ - String primaryKey; - if(ti.hasKey()) { - primaryKey = getMusicKeyFromRow(ti,tableName, keys); - } - else { - primaryKey = getMusicKeyFromRowWithoutPrimaryIndexes(ti,tableName, keys); - } - System.out.println("markDirtyRow: PK value: "+primaryKey); - - Object pkObj = null; - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - pkObj = keyObj[i]; - } - } - for (String repl : allReplicaIds) { - pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - pQueryObject.addValue(tableName); - pQueryObject.addValue(repl); - pQueryObject.addValue(pkObj); - updateMusicDB(tableName, primaryKey, pQueryObject); - //if (!repl.equals(myId)) { - /*logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql); - vallist.set(0, repl); - BoundStatement bound = ps.bind(vallist.toArray()); - bound.setReadTimeoutMillis(60000); - synchronized (sess) { - sess.execute(bound); - }*/ - //} - - } - } - /** - * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys - * @param tableName the table we are removing dirty entries from - * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. - */ - @Override - public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) { - Object[] keysObjects = getObjects(ti,tableName,keys); - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - StringBuilder cols = new StringBuilder("REPLICA__=?"); - List vallist = new ArrayList(); - vallist.add(myId); - int n = 0; - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - cols.append(" AND ").append(ti.columns.get(i)).append("=?"); - vallist.add(keysObjects[n++]); - pQueryObject.addValue(keysObjects[n++]); - } - } - String cql = String.format("DELETE FROM %s.DIRTY_%s WHERE %s;", music_ns, tableName, cols.toString()); - logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql); - pQueryObject.appendQueryString(cql); - ReturnType rt = MusicCore.eventualPut(pQueryObject); - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - System.out.println("Failure while cleanDirtyRow..."+rt.getMessage()); - } - /*Session sess = getMusicSession(); - PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(vallist.toArray()); - bound.setReadTimeoutMillis(60000); - synchronized (sess) { - sess.execute(bound); - }*/ - } - /** - * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica, - * and consist of a Map of primary key column names and values. - * @param tableName the table we are querying for - * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row". - */ - @Override - public List> getDirtyRows(TableInfo ti, String tableName) { - String cql = String.format("SELECT * FROM %s.DIRTY_%s WHERE REPLICA__=?;", music_ns, tableName); - ResultSet results = null; - logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql); - - /*Session sess = getMusicSession(); - PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(new Object[] { myId }); - bound.setReadTimeoutMillis(60000); - synchronized (sess) { - results = sess.execute(bound); - }*/ - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - try { - results = MusicCore.get(pQueryObject); - } catch (MusicServiceException e) { - - e.printStackTrace(); - } - - ColumnDefinitions cdef = results.getColumnDefinitions(); - List> list = new ArrayList>(); - for (Row row : results) { - Map objs = new HashMap(); - for (int i = 0; i < cdef.size(); i++) { - String colname = cdef.getName(i).toUpperCase(); - String coltype = cdef.getType(i).getName().toString().toUpperCase(); - if (!colname.equals("REPLICA__")) { - switch (coltype) { - case "BIGINT": - objs.put(colname, row.getLong(colname)); - break; - case "BOOLEAN": - objs.put(colname, row.getBool(colname)); - break; - case "BLOB": - objs.put(colname, row.getString(colname)); - break; - case "DATE": - objs.put(colname, row.getString(colname)); - break; - case "DOUBLE": - objs.put(colname, row.getDouble(colname)); - break; - case "DECIMAL": - objs.put(colname, row.getDecimal(colname)); - break; - case "INT": - objs.put(colname, row.getInt(colname)); - break; - case "TIMESTAMP": - objs.put(colname, row.getTimestamp(colname)); - break; - case "VARCHAR": - default: - objs.put(colname, row.getString(colname)); - break; - } - } - } - list.add(objs); - } - return list; - } - - /** - * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first. - * @param tableName This is the table that has been dropped - */ - @Override - public void clearMusicForTable(String tableName) { - dropDirtyRowTable(tableName); - String cql = String.format("DROP TABLE %s.%s;", music_ns, tableName); - executeMusicWriteQuery(cql); - } - /** - * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the - * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates - * it to the other replicas. - * - * @param tableName This is the table that has changed. - * @param oldRow This is a copy of the old row being deleted - */ - @Override - public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) { - Object[] objects = getObjects(ti,tableName,oldRow); - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - if (ti.hasKey()) { - assert(ti.columns.size() == objects.length); - } else { - assert(ti.columns.size()+1 == objects.length); - } - - StringBuilder where = new StringBuilder(); - List vallist = new ArrayList(); - String pfx = ""; - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - where.append(pfx) - .append(ti.columns.get(i)) - .append("=?"); - vallist.add(objects[i]); - pQueryObject.addValue(objects[i]); - pfx = " AND "; - } - } - if (!ti.hasKey()) { - where.append(MDBC_PRIMARYKEY_NAME + "=?"); - //\FIXME this is wrong, old row is not going to contain the UUID, this needs to be fixed - vallist.add(UUID.fromString((String) objects[0])); - pQueryObject.addValue(UUID.fromString((String) objects[0])); - } - - String cql = String.format("DELETE FROM %s.%s WHERE %s;", music_ns, tableName, where.toString()); - logger.error(EELFLoggerDelegate.errorLogger,"Executing MUSIC write:"+ cql); - pQueryObject.appendQueryString(cql); - - /*PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(vallist.toArray()); - bound.setReadTimeoutMillis(60000); - Session sess = getMusicSession(); - synchronized (sess) { - sess.execute(bound); - }*/ - String primaryKey = getMusicKeyFromRow(ti,tableName, oldRow); - if(MusicMixin.criticalTables.contains(tableName)) { - ReturnType rt = null; - try { - rt = MusicCore.atomicPut(music_ns, tableName, primaryKey, pQueryObject, null); - } catch (MusicLockingException e) { - e.printStackTrace(); - } catch (MusicServiceException e) { - e.printStackTrace(); - } catch (MusicQueryException e) { - e.printStackTrace(); - } - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - System.out.println("Failure while critical put..."+rt.getMessage()); - } - } else { - ReturnType rt = MusicCore.eventualPut(pQueryObject); - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - System.out.println("Failure while critical put..."+rt.getMessage()); - } - } - // Mark the dirty rows in music for all the replicas but us - markDirtyRow(ti,tableName, oldRow); - } - - public Set getMusicTableSet(String ns) { - Set set = new TreeSet(); - String cql = String.format("SELECT TABLE_NAME FROM SYSTEM_SCHEMA.TABLES WHERE KEYSPACE_NAME = '%s'", ns); - ResultSet rs = executeMusicRead(cql); - for (Row row : rs) { - set.add(row.getString("TABLE_NAME").toUpperCase()); - } - return set; - } - /** - * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local - * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL - * @param tableName This is the table on which the select is being performed - */ - @Override - public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) { - // Read dirty rows of this table from Music - TableInfo ti = dbi.getTableInfo(tableName); - List> objlist = getDirtyRows(ti,tableName); - PreparedQueryObject pQueryObject = null; - String pre_cql = String.format("SELECT * FROM %s.%s WHERE ", music_ns, tableName); - List vallist = new ArrayList(); - StringBuilder sb = new StringBuilder(); - //\TODO Perform a batch operation instead of each row at a time - for (Map map : objlist) { - pQueryObject = new PreparedQueryObject(); - sb.setLength(0); - vallist.clear(); - String pfx = ""; - for (String key : map.keySet()) { - sb.append(pfx).append(key).append("=?"); - vallist.add(map.get(key)); - pQueryObject.addValue(map.get(key)); - pfx = " AND "; - } - - String cql = pre_cql + sb.toString(); - System.out.println("readDirtyRowsAndUpdateDb: cql: "+cql); - pQueryObject.appendQueryString(cql); - ResultSet dirtyRows = null; - try { - //\TODO Why is this an eventual put?, this should be an atomic - dirtyRows = MusicCore.get(pQueryObject); - } catch (MusicServiceException e) { - - e.printStackTrace(); - } - /* - Session sess = getMusicSession(); - PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(vallist.toArray()); - bound.setReadTimeoutMillis(60000); - ResultSet dirtyRows = null; - synchronized (sess) { - dirtyRows = sess.execute(bound); - }*/ - List rows = dirtyRows.all(); - if (rows.isEmpty()) { - // No rows, the row must have been deleted - deleteRowFromSqlDb(dbi,tableName, map); - } else { - for (Row row : rows) { - writeMusicRowToSQLDb(dbi,tableName, row); - } - } - } - } - - private void deleteRowFromSqlDb(DBInterface dbi, String tableName, Map map) { - dbi.deleteRowFromSqlDb(tableName, map); - TableInfo ti = dbi.getTableInfo(tableName); - List vallist = new ArrayList(); - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - String col = ti.columns.get(i); - Object val = map.get(col); - vallist.add(val); - } - } - cleanDirtyRow(ti, tableName, new JSONObject(vallist)); - } - /** - * This functions copies the contents of a row in Music into the corresponding row in the SQL table - * @param tableName This is the name of the table in both Music and swl - * @param musicRow This is the row in Music that is being copied into SQL - */ - private void writeMusicRowToSQLDb(DBInterface dbi, String tableName, Row musicRow) { - // First construct the map of columns and their values - TableInfo ti = dbi.getTableInfo(tableName); - Map map = new HashMap(); - List vallist = new ArrayList(); - String rowid = tableName; - for (String col : ti.columns) { - Object val = getValue(musicRow, col); - map.put(col, val); - if (ti.iskey(col)) { - vallist.add(val); - rowid += "_" + val.toString(); - } - } - - logger.debug("Blocking rowid: "+rowid); - in_progress.add(rowid); // Block propagation of the following INSERT/UPDATE - - dbi.insertRowIntoSqlDb(tableName, map); - - logger.debug("Unblocking rowid: "+rowid); - in_progress.remove(rowid); // Unblock propagation - -// try { -// String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString()); -// executeSQLWrite(sql); -// } catch (SQLException e) { -// logger.debug("Insert failed because row exists, do an update"); -// // TODO - rewrite this UPDATE command should not update key fields -// String sql = String.format("UPDATE %s SET (%s) = (%s) WHERE %s", tableName, fields.toString(), values.toString(), where.toString()); -// try { -// executeSQLWrite(sql); -// } catch (SQLException e1) { -// e1.printStackTrace(); -// } -// } - - ti = dbi.getTableInfo(tableName); - cleanDirtyRow(ti, tableName, new JSONObject(vallist)); - -// String selectQuery = "select "+ primaryKeyName+" FROM "+tableName+" WHERE "+primaryKeyName+"="+primaryKeyValue+";"; -// java.sql.ResultSet rs = executeSQLRead(selectQuery); -// String dbWriteQuery=null; -// try { -// if(rs.next()){//this entry is there, do an update -// dbWriteQuery = "UPDATE "+tableName+" SET "+columnNameString+" = "+ valueString +"WHERE "+primaryKeyName+"="+primaryKeyValue+";"; -// }else -// dbWriteQuery = "INSERT INTO "+tableName+" VALUES"+valueString+";"; -// executeSQLWrite(dbWriteQuery); -// } catch (SQLException e) { -// // ZZTODO Auto-generated catch block -// e.printStackTrace(); -// } - - //clean the music dirty bits table -// String dirtyRowIdsTableName = music_ns+".DIRTY_"+tableName+"_"+myId; -// String deleteQuery = "DELETE FROM "+dirtyRowIdsTableName+" WHERE dirtyRowKeys=$$"+primaryKeyValue+"$$;"; -// executeMusicWriteQuery(deleteQuery); - } - private Object getValue(Row musicRow, String colname) { - ColumnDefinitions cdef = musicRow.getColumnDefinitions(); - DataType colType; - try { - colType= cdef.getType(colname); - } - catch(IllegalArgumentException e) { - logger.warn("Colname is not part of table metadata: "+e); - throw e; - } - String typeStr = colType.getName().toString().toUpperCase(); - switch (typeStr) { - case "BIGINT": - return musicRow.getLong(colname); - case "BOOLEAN": - return musicRow.getBool(colname); - case "BLOB": - return musicRow.getString(colname); - case "DATE": - return musicRow.getString(colname); - case "DECIMAL": - return musicRow.getDecimal(colname); - case "DOUBLE": - return musicRow.getDouble(colname); - case "SMALLINT": - case "INT": - return musicRow.getInt(colname); - case "TIMESTAMP": - return musicRow.getTimestamp(colname); - case "UUID": - return musicRow.getUUID(colname); - default: - logger.error(EELFLoggerDelegate.errorLogger, "UNEXPECTED COLUMN TYPE: columname="+colname+", columntype="+typeStr); - // fall thru - case "VARCHAR": - return musicRow.getString(colname); - } - } - - /** - * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the - * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates - * it to the other replicas. - * - * @param tableName This is the table that has changed. - * @param changedRow This is information about the row that has changed - */ - @Override - public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) { - // Build the CQL command - Object[] objects = getObjects(ti,tableName,changedRow); - StringBuilder fields = new StringBuilder(); - StringBuilder values = new StringBuilder(); - String rowid = tableName; - Object[] newrow = new Object[objects.length]; - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - String pfx = ""; - int keyoffset=0; - for (int i = 0; i < objects.length; i++) { - if (!ti.hasKey() && i==0) { - //We need to tack on cassandra's uid in place of a primary key - fields.append(MDBC_PRIMARYKEY_NAME); - values.append("?"); - newrow[i] = UUID.fromString((String) objects[i]); - pQueryObject.addValue(newrow[i]); - keyoffset=-1; - pfx = ", "; - continue; - } - fields.append(pfx).append(ti.columns.get(i+keyoffset)); - values.append(pfx).append("?"); - pfx = ", "; - if (objects[i] instanceof byte[]) { - // Cassandra doesn't seem to have a Codec to translate a byte[] to a ByteBuffer - newrow[i] = ByteBuffer.wrap((byte[]) objects[i]); - pQueryObject.addValue(newrow[i]); - } else if (objects[i] instanceof Reader) { - // Cassandra doesn't seem to have a Codec to translate a Reader to a ByteBuffer either... - newrow[i] = ByteBuffer.wrap(readBytesFromReader((Reader) objects[i])); - pQueryObject.addValue(newrow[i]); - } else { - newrow[i] = objects[i]; - pQueryObject.addValue(newrow[i]); - } - if (i+keyoffset>=0 && ti.iskey.get(i+keyoffset)) { - rowid += "_" + newrow[i].toString(); - } - } - - if (in_progress.contains(rowid)) { - // This call to updateDirtyRowAndEntityTableInMusic() was called as a result of a Cassandra -> H2 update; ignore - logger.debug(EELFLoggerDelegate.applicationLogger, "updateDirtyRowAndEntityTableInMusic: bypassing MUSIC update on "+rowid); - - } else { - // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update - String cql = String.format("INSERT INTO %s.%s (%s) VALUES (%s);", music_ns, tableName, fields.toString(), values.toString()); - - pQueryObject.appendQueryString(cql); - String primaryKey = getMusicKeyFromRow(ti,tableName, changedRow); - updateMusicDB(tableName, primaryKey, pQueryObject); - - /*PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(newrow); - bound.setReadTimeoutMillis(60000); - Session sess = getMusicSession(); - synchronized (sess) { - sess.execute(bound); - }*/ - // Mark the dirty rows in music for all the replicas but us - markDirtyRow(ti,tableName, changedRow); - } - } - - - - private byte[] readBytesFromReader(Reader rdr) { - StringBuilder sb = new StringBuilder(); - try { - int ch; - while ((ch = rdr.read()) >= 0) { - sb.append((char)ch); - } - } catch (IOException e) { - logger.warn("readBytesFromReader: "+e); - } - return sb.toString().getBytes(); - } - - protected PreparedStatement getPreparedStatementFromCache(String cql) { - // Note: have to hope that the Session never changes! - if (!ps_cache.containsKey(cql)) { - Session sess = getMusicSession(); - PreparedStatement ps = sess.prepare(cql); - ps_cache.put(cql, ps); - } - return ps_cache.get(cql); - } - - /** - * This method gets a connection to Music - * @return the Cassandra Session to use - */ - protected Session getMusicSession() { - // create cassandra session - if (musicSession == null) { - logger.info(EELFLoggerDelegate.applicationLogger, "Creating New Music Session"); - mCon = new MusicConnector(musicAddress); - musicSession = mCon.getSession(); - } - return musicSession; - } - - /** - * This method executes a write query in Music - * @param cql the CQL to be sent to Cassandra - */ - protected void executeMusicWriteQuery(String cql) { - logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql); - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - ReturnType rt = MusicCore.eventualPut(pQueryObject); - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage()); - } - /*Session sess = getMusicSession(); - SimpleStatement s = new SimpleStatement(cql); - s.setReadTimeoutMillis(60000); - synchronized (sess) { - sess.execute(s); - }*/ - } - - /** - * This method executes a read query in Music - * @param cql the CQL to be sent to Cassandra - * @return a ResultSet containing the rows returned from the query - */ - protected ResultSet executeMusicRead(String cql) { - logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql); - PreparedQueryObject pQueryObject = new PreparedQueryObject(); - pQueryObject.appendQueryString(cql); - ResultSet results = null; - try { - results = MusicCore.get(pQueryObject); - } catch (MusicServiceException e) { - - e.printStackTrace(); - } - return results; - /*Session sess = getMusicSession(); - synchronized (sess) { - return sess.execute(cql); - }*/ - } - - /** - * Returns the default primary key name that this mixin uses - */ - public String getMusicDefaultPrimaryKeyName() { - return MDBC_PRIMARYKEY_NAME; - } - - /** - * Return the function for cassandra's primary key generation - */ - public String generateUniqueKey() { - return UUID.randomUUID().toString(); - } - - @Override - public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow) { - //\TODO this operation is super expensive to perform, both latency and BW - // it is better to add additional where clauses, and have the primary key - // to be composed of known columns of the table - // Adding this primary indexes would be an additional burden to the developers, which spanner - // also does, but otherwise performance is really bad - // At least it should have a set of columns that are guaranteed to be unique - StringBuilder cqlOperation = new StringBuilder(); - cqlOperation.append("SELECT * FROM ") - .append(music_ns) - .append(".") - .append(table); - ResultSet musicResults = executeMusicRead(cqlOperation.toString()); - Object[] dbRowObjects = getObjects(ti,table,dbRow); - while (!musicResults.isExhausted()) { - Row musicRow = musicResults.one(); - if (rowIs(ti, musicRow, dbRowObjects)) { - return ((UUID)getValue(musicRow, MDBC_PRIMARYKEY_NAME)).toString(); - } - } - //should never reach here - return null; - } - - /** - * Checks to see if this row is in list of database entries - * @param ti - * @param musicRow - * @param dbRow - * @return - */ - private boolean rowIs(TableInfo ti, Row musicRow, Object[] dbRow) { - //System.out.println("Comparing " + musicRow.toString()); - boolean sameRow=true; - for (int i=0; i keyCols = ti.getKeyColumns(); - if(keyCols.isEmpty()){ - throw new IllegalArgumentException("Table doesn't have defined primary indexes "); - } - StringBuilder key = new StringBuilder(); - String pfx = ""; - for(String keyCol: keyCols) { - key.append(pfx); - key.append(row.getString(keyCol)); - pfx = ","; - } - String keyStr = key.toString(); - return keyStr; - } - - public void updateMusicDB(String tableName, String primaryKey, PreparedQueryObject pQObject) { - if(MusicMixin.criticalTables.contains(tableName)) { - ReturnType rt = null; - try { - rt = MusicCore.atomicPut(music_ns, tableName, primaryKey, pQObject, null); - } catch (MusicLockingException e) { - e.printStackTrace(); - } catch (MusicServiceException e) { - e.printStackTrace(); - } catch (MusicQueryException e) { - e.printStackTrace(); - } - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - System.out.println("Failure while critical put..."+rt.getMessage()); - } - } else { - ReturnType rt = MusicCore.eventualPut(pQObject); - if(rt.getResult().getResult().toLowerCase().equals("failure")) { - System.out.println("Failure while critical put..."+rt.getMessage()); - } - } - } - - - private PreparedQueryObject createAppendMtxdIndexToMriQuery(String mriTable, String uuid, String table, UUID redoUuid){ - PreparedQueryObject query = new PreparedQueryObject(); - StringBuilder appendBuilder = new StringBuilder(); - appendBuilder.append("UPDATE ") - .append(music_ns) - .append(".") - .append(mriTable) - .append(" SET redo = redo +[('") - .append(table) - .append("',") - .append(redoUuid) - .append(")] WHERE id = ") - .append(uuid) - .append(";"); - query.appendQueryString(appendBuilder.toString()); - return query; - } - - protected String createAndAssignLock(String fullyQualifiedKey, DatabasePartition partition, String keyspace, String table, String key) throws MDBCServiceException { - String lockId; - lockId = MusicCore.createLockReference(fullyQualifiedKey); - ReturnType lockReturn; - try { - lockReturn = MusicCore.acquireLock(fullyQualifiedKey,lockId); - } catch (MusicLockingException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Lock was not acquire correctly for key "+fullyQualifiedKey); - throw new MDBCServiceException("Lock was not acquire correctly for key "+fullyQualifiedKey); - } catch (MusicServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey); - throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey); - } catch (MusicQueryException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey); - throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey); - } - //\TODO this is wrong, we should have a better way to obtain a lock forcefully, clean the queue and obtain the lock - if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) { - try { - MusicCore.forciblyReleaseLock(fullyQualifiedKey,lockId); - CassaLockStore lockingServiceHandle = MusicCore.getLockingServiceHandle(); - CassaLockStore.LockObject lockOwner = lockingServiceHandle.peekLockQueue(keyspace, table, key); - while(lockOwner.lockRef != lockId) { - MusicCore.forciblyReleaseLock(fullyQualifiedKey, lockOwner.lockRef); - try { - lockOwner = lockingServiceHandle.peekLockQueue(keyspace, table, key); - } catch(NullPointerException e){ - //Ignore null pointer exception - lockId = MusicCore.createLockReference(fullyQualifiedKey); - break; - } - } - lockReturn = MusicCore.acquireLock(fullyQualifiedKey,lockId); - - } catch (MusicLockingException e) { - throw new MDBCServiceException("Could not lock the corresponding lock"); - } catch (MusicServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey); - throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey); - } catch (MusicQueryException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey); - throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey); - } - } - if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) { - throw new MDBCServiceException("Could not lock the corresponding lock"); - } - //TODO: Java newbie here, verify that this lockId is actually assigned to the global DatabasePartition in the StateManager instance - partition.setLockId(lockId); - return lockId; - } - - protected void pushRowToMtxd(UUID commitId, HashMap transactionDigest) throws MDBCServiceException{ - PreparedQueryObject query = new PreparedQueryObject(); - StringBuilder cqlQuery = new StringBuilder("INSERT INTO ") - .append(music_ns) - .append('.') - .append(musicTxDigestTableName) - .append(" (txid,transactiondigest) ") - .append("VALUES ('") - .append( commitId ).append(",'"); - try { - cqlQuery.append( MDBCUtils.toString(transactionDigest) ); - } catch (IOException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId); - throw new MDBCServiceException("Transaction Digest serialization was invalid for commit "+commitId); - } - cqlQuery.append("');"); - query.appendQueryString(cqlQuery.toString()); - //\TODO check if I am not shooting on my own foot - try { - MusicCore.nonKeyRelatedPut(query,"critical"); - } catch (MusicServiceException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId); - throw new MDBCServiceException("Transaction Digest serialization for commit "+commitId); - } - } - - protected void appendIndexToMri(String lockId, UUID commitId, String MriIndex) throws MDBCServiceException{ - PreparedQueryObject appendQuery = createAppendMtxdIndexToMriQuery(musicRangeInformationTableName, MriIndex, musicTxDigestTableName, commitId); - ReturnType returnType = MusicCore.criticalPut(music_ns, musicRangeInformationTableName, MriIndex, appendQuery, lockId, null); - if(returnType.getResult().compareTo(ResultType.SUCCESS) != 0 ){ - logger.error(EELFLoggerDelegate.errorLogger, "Error when executing append operation with return type: "+returnType.getMessage()); - throw new MDBCServiceException("Error when executing append operation with return type: "+returnType.getMessage()); - } - } - - @Override - public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap transactionDigest, String txId ,TxCommitProgress progressKeeper) throws MDBCServiceException{ - String MriIndex = partition.getMusicRangeInformationIndex(); - if(MriIndex.isEmpty()) { - //\TODO Fetch MriIndex from the Range Information Table - throw new MDBCServiceException("TIT Index retrieval not yet implemented"); - } - String fullyQualifiedTitKey = music_ns+"."+ musicRangeInformationTableName +"."+MriIndex; - //0. See if reference to lock was already created - String lockId = partition.getLockId(); - if(lockId == null || lockId.isEmpty()) { - lockId = createAndAssignLock(fullyQualifiedTitKey,partition,music_ns, musicRangeInformationTableName,MriIndex); - } - - UUID commitId; - //Generate a local commit id - if(progressKeeper.containsTx(txId)) { - commitId = progressKeeper.getCommitId(txId); - } - else{ - logger.error(EELFLoggerDelegate.errorLogger, "Tx with id "+txId+" was not created in the TxCommitProgress "); - throw new MDBCServiceException("Tx with id "+txId+" was not created in the TxCommitProgress "); - } - //Add creation type of transaction digest - - //1. Push new row to RRT and obtain its index - pushRowToMtxd(commitId, transactionDigest); - - //2. Save RRT index to RQ - if(progressKeeper!= null) { - progressKeeper.setRecordId(txId,new MusixTxDigestId(commitId)); - } - //3. Append RRT index into the corresponding TIT row array - appendIndexToMri(lockId,commitId,MriIndex); - } - - /** - * @param tableName - * @param string - * @param rowValues - * @return - */ - @SuppressWarnings("unused") - private String getUid(String tableName, String string, Object[] rowValues) { - // - // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update - String cql = String.format("SELECT * FROM %s.%s;", music_ns, tableName); - PreparedStatement ps = getPreparedStatementFromCache(cql); - BoundStatement bound = ps.bind(); - bound.setReadTimeoutMillis(60000); - Session sess = getMusicSession(); - ResultSet rs; - synchronized (sess) { - rs = sess.execute(bound); - } - - // - //should never reach here - logger.error(EELFLoggerDelegate.errorLogger, "Could not find the row in the primary key"); - - return null; - } - - @Override - public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) { - // \FIXME: we may need to add the primary key of the row if it was autogenerated by MUSIC - List cols = ti.columns; - int size = cols.size(); - boolean hasDefault = false; - if(row.has(getMusicDefaultPrimaryKeyName())) { - size++; - hasDefault = true; - } - - Object[] objects = new Object[size]; - int idx = 0; - if(hasDefault) { - objects[idx++] = row.getString(getMusicDefaultPrimaryKeyName()); - } - for(String col : ti.columns) { - objects[idx]=row.get(col); - } - return objects; - } - - @Override - - public MusicRangeInformationRow getMusicRangeInformation(UUID id){ - throw new UnsupportedOperationException(); - } - - @Override - public MriReference createMusicRangeInformation(MusicRangeInformationRow info){ - throw new UnsupportedOperationException(); - } - - @Override - public void appendToRedoLog(MriReference mriRowId, DatabasePartition partition, MusixTxDigestId newRecord){ - throw new UnsupportedOperationException(); - } - - @Override - public void addTxDigest(String musicTxDigestTable, MusixTxDigestId newId, String transactionDigest){ - throw new UnsupportedOperationException(); - } - - @Override - public List getPartitionInformation(DatabasePartition partition){ - throw new UnsupportedOperationException(); - } - - @Override - public HashMap getTransactionDigest(MusixTxDigestId id){ - throw new UnsupportedOperationException(); - } - - @Override - public void own(List ranges){ - throw new UnsupportedOperationException(); - } - - @Override - public void appendRange(String rangeId, List ranges){ - throw new UnsupportedOperationException(); - } - - @Override - public void relinquish(String ownerId, String rangeId){ - throw new UnsupportedOperationException(); - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/DBInterface.java b/src/main/java/com/att/research/mdbc/mixins/DBInterface.java deleted file mode 100755 index e2b2ad7..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/DBInterface.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.sql.ResultSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import com.att.research.mdbc.Range; -import com.att.research.mdbc.TableInfo; -import com.att.research.mdbc.tables.StagingTable; - -/** - * This Interface defines the methods that MDBC needs in order to mirror data to/from a Database instance. - * - * @author Robert P. Eby - */ -public interface DBInterface { - /** - * Get the name of this DBnterface mixin object. - * @return the name - */ - String getMixinName(); - /** - * Do what is needed to close down the database connection. - */ - void close(); - /** - * Get a set of the table names in the database. The table names should be returned in UPPER CASE. - * @return the set - */ - Set getSQLTableSet(); - /** - * Return the name of the database that the driver is connected to - * @return - */ - String getDatabaseName(); - /** - * Return a TableInfo object for the specified table. - * @param tableName the table to look up - * @return a TableInfo object containing the info we need, or null if the table does not exist - */ - TableInfo getTableInfo(String tableName); - /** - * This method should create triggers in the database to be called for each row after every INSERT, - * UPDATE and DELETE, and before every SELECT. - * @param tableName this is the table on which triggers are being created. - */ - void createSQLTriggers(String tableName); - /** - * This method should drop all triggers previously created in the database for the table. - * @param tableName this is the table on which triggers are being dropped. - */ - void dropSQLTriggers(String tableName); - /** - * This method inserts a row into the SQL database, defined via a map of column names and values. - * @param tableName the table to insert the row into - * @param map map of column names → values to use for the keys when inserting the row - */ - void insertRowIntoSqlDb(String tableName, Map map); - /** - * This method deletes a row from the SQL database, defined via a map of column names and values. - * @param tableName the table to delete the row from - * @param map map of column names → values to use for the keys when deleting the row - */ - void deleteRowFromSqlDb(String tableName, Map map); - /** - * Code to be run within the DB driver before a SQL statement is executed. This is where tables - * can be synchronized before a SELECT, for those databases that do not support SELECT triggers. - * @param sql the SQL statement that is about to be executed - */ - void preStatementHook(final String sql); - /** - * Code to be run within the DB driver after a SQL statement has been executed. This is where remote - * statement actions can be copied back to Cassandra/MUSIC. - * @param sql the SQL statement that was executed - * @param transactionDigest - */ - void postStatementHook(final String sql,Map transactionDigest); - /** - * This method executes a read query in the SQL database. Methods that call this method should be sure - * to call resultset.getStatement().close() when done in order to free up resources. - * @param sql the query to run - * @return a ResultSet containing the rows returned from the query - */ - ResultSet executeSQLRead(String sql); - - void synchronizeData(String tableName); - - List getReservedTblNames(); - - String getPrimaryKey(String sql, String tableName); -} diff --git a/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java b/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java deleted file mode 100755 index 68d2986..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.sql.Connection; -import java.util.Properties; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.DatabasePartition; -import com.att.research.mdbc.MusicSqlManager; - -/** - * This class is used to construct instances of Mixins that implement either the {@link com.att.research.mdbc.mixins.DBInterface} - * interface, or the {@link com.att.research.mdbc.mixins.MusicInterface} interface. The Mixins are searched for in the CLASSPATH. - * - * @author Robert P. Eby - */ -public class MixinFactory { - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MixinFactory.class); - - // Only static methods... - private MixinFactory(){} - - /** - * Look for a class in CLASSPATH that implements the {@link DBInterface} interface, and has the mixin name name. - * If one is found, construct and return it, using the other arguments for the constructor. - * @param name the name of the Mixin - * @param msm the MusicSqlManager to use as an argument to the constructor - * @param url the URL to use as an argument to the constructor - * @param conn the underlying JDBC Connection - * @param info the Properties to use as an argument to the constructor - * @return the newly constructed DBInterface, or null if one cannot be found. - */ - public static DBInterface createDBInterface(String name, MusicSqlManager msm, String url, Connection conn, Properties info) { - for (Class cl : Utils.getClassesImplementing(DBInterface.class)) { - try { - Constructor con = cl.getConstructor(); - if (con != null) { - DBInterface dbi = (DBInterface) con.newInstance(); - String miname = dbi.getMixinName(); - logger.info(EELFLoggerDelegate.applicationLogger,"Checking "+miname); - if (miname.equalsIgnoreCase(name)) { - con = cl.getConstructor(MusicSqlManager.class, String.class, Connection.class, Properties.class); - if (con != null) { - logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname); - return (DBInterface) con.newInstance(msm, url, conn, info); - } - } - } - } catch (Exception e) { - logger.error(EELFLoggerDelegate.errorLogger,"createDBInterface: "+e); - } - } - return null; - } - /** - * Look for a class in CLASSPATH that implements the {@link MusicInterface} interface, and has the mixin name name. - * If one is found, construct and return it, using the other arguments for the constructor. - * @param name the name of the Mixin - * @param msm the MusicSqlManager to use as an argument to the constructor - * @param dbi the DBInterface to use as an argument to the constructor - * @param url the URL to use as an argument to the constructor - * @param info the Properties to use as an argument to the constructor - * @return the newly constructed MusicInterface, or null if one cannot be found. - */ - public static MusicInterface createMusicInterface(String name, String url, Properties info, DatabasePartition ranges) { - for (Class cl : Utils.getClassesImplementing(MusicInterface.class)) { - try { - Constructor con = cl.getConstructor(); - if (con != null) { //TODO: is this necessary? Don't think it could ever be null? - MusicInterface mi = (MusicInterface) con.newInstance(); - String miname = mi.getMixinName(); - logger.info(EELFLoggerDelegate.applicationLogger, "Checking "+miname); - if (miname.equalsIgnoreCase(name)) { - con = cl.getConstructor(String.class, Properties.class, DatabasePartition.class); - if (con != null) { - logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname); - return (MusicInterface) con.newInstance(url, info, ranges); - } - } - } - } catch (InvocationTargetException e) { - logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e.getCause().toString()); - } - catch (Exception e) { - logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e); - } - } - return null; - } - - // Unfortunately, this version does not work when MDBC is built as a JBoss module, - // where something funny is happening with the classloaders -// @SuppressWarnings("unused") -// private static List> getClassesImplementingOld(Class implx) { -// List> list = new ArrayList>(); -// try { -// ClassLoader cldr = MixinFactory.class.getClassLoader(); -// while (cldr != null) { -// ClassPath cp = ClassPath.from(cldr); -// for (ClassPath.ClassInfo x : cp.getAllClasses()) { -// if (x.toString().startsWith("com.att.")) { // mixins must have a package starting with com.att. -// Class cl = x.load(); -// if (impl(cl, implx)) { -// list.add(cl); -// } -// } -// } -// cldr = cldr.getParent(); -// } -// } catch (IOException e) { -// // ignore -// } -// return list; -// } - static boolean impl(Class cl, Class imp) { - for (Class c2 : cl.getInterfaces()) { - if (c2 == imp) { - return true; - } - } - Class c2 = cl.getSuperclass(); - return (c2 != null) ? impl(c2, imp) : false; - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java b/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java deleted file mode 100755 index 8df6ed5..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java +++ /dev/null @@ -1,124 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.net.SocketException; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.Iterator; -import java.util.List; - -import com.att.research.logging.EELFLoggerDelegate; -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.HostDistance; -import com.datastax.driver.core.Metadata; -import com.datastax.driver.core.PoolingOptions; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.exceptions.NoHostAvailableException; -import org.onap.music.main.MusicCore; - -/** - * This class allows for management of the Cassandra Cluster and Session objects. - * - * @author Robert P. Eby - */ -public class MusicConnector { - - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicConnector.class); - - private Session session; - private Cluster cluster; - - protected MusicConnector() { - //to defeat instantiation since this is a singleton - } - - public MusicConnector(String address) { -// connectToCassaCluster(address); - connectToMultipleAddresses(address); - } - - public Session getSession() { - return session; - } - - public void close() { - if (session != null) - session.close(); - session = null; - if (cluster != null) - cluster.close(); - cluster = null; - } - - private List getAllPossibleLocalIps(){ - ArrayList allPossibleIps = new ArrayList(); - try { - Enumeration en = NetworkInterface.getNetworkInterfaces(); - while(en.hasMoreElements()){ - NetworkInterface ni=(NetworkInterface) en.nextElement(); - Enumeration ee = ni.getInetAddresses(); - while(ee.hasMoreElements()) { - InetAddress ia= (InetAddress) ee.nextElement(); - allPossibleIps.add(ia.getHostAddress()); - } - } - } catch (SocketException e) { - e.printStackTrace(); - } - return allPossibleIps; - } - - private void connectToMultipleAddresses(String address) { - MusicCore.getDSHandle(address); - /* - PoolingOptions poolingOptions = - new PoolingOptions() - .setConnectionsPerHost(HostDistance.LOCAL, 4, 10) - .setConnectionsPerHost(HostDistance.REMOTE, 2, 4); - String[] music_hosts = address.split(","); - if (cluster == null) { - logger.info(EELFLoggerDelegate.applicationLogger,"Initializing MUSIC Client with endpoints "+address); - cluster = Cluster.builder() - .withPort(9042) - .withPoolingOptions(poolingOptions) - .withoutMetrics() - .addContactPoints(music_hosts) - .build(); - Metadata metadata = cluster.getMetadata(); - logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address); - - } - session = cluster.connect(); - */ - } - - @SuppressWarnings("unused") - private void connectToCassaCluster(String address) { - PoolingOptions poolingOptions = - new PoolingOptions() - .setConnectionsPerHost(HostDistance.LOCAL, 4, 10) - .setConnectionsPerHost(HostDistance.REMOTE, 2, 4); - Iterator it = getAllPossibleLocalIps().iterator(); - logger.info(EELFLoggerDelegate.applicationLogger,"Iterating through possible ips:"+getAllPossibleLocalIps()); - - while (it.hasNext()) { - try { - cluster = Cluster.builder() - .withPort(9042) - .withPoolingOptions(poolingOptions) - .withoutMetrics() - .addContactPoint(address) - .build(); - //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE); - Metadata metadata = cluster.getMetadata(); - logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address); - - session = cluster.connect(); - break; - } catch (NoHostAvailableException e) { - address = it.next(); - } - } - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java b/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java deleted file mode 100755 index 7fb6d18..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java +++ /dev/null @@ -1,173 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import org.json.JSONObject; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.mdbc.DatabasePartition; -import com.att.research.mdbc.Range; -import com.att.research.mdbc.TableInfo; -import com.att.research.mdbc.tables.PartitionInformation; -import com.att.research.mdbc.tables.MusixTxDigestId; -import com.att.research.mdbc.tables.StagingTable; -import com.att.research.mdbc.tables.MriReference; -import com.att.research.mdbc.tables.MusicRangeInformationRow; -import com.att.research.mdbc.tables.TxCommitProgress; - -/** - * This Interface defines the methods that MDBC needs for a class to provide access to the persistence layer of MUSIC. - * - * @author Robert P. Eby - */ -public interface MusicInterface { - /** - * This function is used to created all the required data structures, both local - * \TODO Check if this function is required in the MUSIC interface or could be just created on the constructor - */ - void initializeMetricDataStructures() throws MDBCServiceException; - /** - * Get the name of this MusicInterface mixin object. - * @return the name - */ - String getMixinName(); - /** - * Gets the name of this MusicInterface mixin's default primary key name - * @return default primary key name - */ - String getMusicDefaultPrimaryKeyName(); - /** - * generates a key or placeholder for what is required for a primary key - * @return a primary key - */ - String generateUniqueKey(); - - /** - * Find the key used with Music for a table that was created without a primary index - * Name is long to avoid developers using it. For cassandra performance in this operation - * is going to be really bad - * @param ti information of the table in the SQL layer - * @param table name of the table - * @param dbRow row obtained from the SQL layer - * @return key associated with the row - */ - String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow); - /** - * Do what is needed to close down the MUSIC connection. - */ - void close(); - /** - * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables. - * The keyspace name comes from the initialization properties passed to the JDBC driver. - */ - void createKeyspace(); - /** - * This method performs all necessary initialization in Music/Cassandra to store the table tableName. - * @param tableName the table to initialize MUSIC for - */ - void initializeMusicForTable(TableInfo ti, String tableName); - /** - * Create a dirty row table for the real table tableName. The primary keys columns from the real table are recreated in - * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC. - * @param tableName the table to create a "dirty" table for - */ - void createDirtyRowTable(TableInfo ti, String tableName); - /** - * Drop the dirty row table for tableName from MUSIC. - * @param tableName the table being dropped - */ - void dropDirtyRowTable(String tableName); - /** - * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first. - * @param tableName This is the table that has been dropped - */ - void clearMusicForTable(String tableName); - /** - * Mark rows as "dirty" in the dirty rows table for tableName. Rows are marked for all replicas but - * this one (this replica already has the up to date data). - * @param tableName the table we are marking dirty - * @param keys an ordered list of the values being put into the table. The values that correspond to the tables' - * primary key are copied into the dirty row table. - */ - void markDirtyRow(TableInfo ti, String tableName, JSONObject keys); - /** - * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys - * @param tableName the table we are removing dirty entries from - * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. - */ - void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys); - /** - * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica, - * and consist of a Map of primary key column names and values. - * @param tableName the table we are querying for - * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row". - */ - List> getDirtyRows(TableInfo ti, String tableName); - /** - * This method is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the - * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates - * it to the other replicas. - * @param tableName This is the table that has changed. - * @param oldRow This is a copy of the old row being deleted - */ - void deleteFromEntityTableInMusic(TableInfo ti,String tableName, JSONObject oldRow); - /** - * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local - * dirty bits table to see if there are any rows in Cassandra whose value needs to be copied to the local SQL DB. - * @param tableName This is the table on which the select is being performed - */ - void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName); - /** - * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the - * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates - * it to the other replicas. - * @param tableName This is the table that has changed. - * @param changedRow This is information about the row that has changed - */ - void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow); - - Object[] getObjects(TableInfo ti, String tableName, JSONObject row); - /** - * Returns the primary key associated with the given row - * @param ti info of the table that is associated with the row - * @param tableName name of the table that contains the row - * @param changedRow row that is going to contain the information associated with the primary key - * @return primary key of the row - */ - String getMusicKeyFromRow(TableInfo ti, String tableName, JSONObject changedRow); - - /** - * Commits the corresponding REDO-log into MUSIC - * - * @param dbi, the database interface use in the local SQL cache, where the music interface is being used - * @param partition - * @param transactionDigest digest of the transaction that is being committed into the Redo log in music. It has to be a HashMap, because it is required to be serializable - * @param txId id associated with the log being send - * @param progressKeeper data structure that is used to handle to detect failures, and know what to do - * @throws MDBCServiceException - */ - void commitLog(DBInterface dbi, DatabasePartition partition, HashMap transactionDigest, String txId,TxCommitProgress progressKeeper) throws MDBCServiceException; - - MusicRangeInformationRow getMusicRangeInformation(UUID id); - - MriReference createMusicRangeInformation(MusicRangeInformationRow info); - - void appendToRedoLog(MriReference mriRowId, DatabasePartition partition, MusixTxDigestId newRecord); - - void addTxDigest(String musicTxDigestTable, MusixTxDigestId newId, String transactionDigest); - - List getPartitionInformation(DatabasePartition partition); - - HashMap getTransactionDigest(MusixTxDigestId id); - - void own(List ranges); - - void appendRange(String rangeId, List ranges); - - void relinquish(String ownerId, String rangeId); - -} - diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java b/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java deleted file mode 100644 index 630b75e..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java +++ /dev/null @@ -1,233 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.io.IOException; -import java.io.InputStream; -import java.util.*; - -import com.att.research.mdbc.LockId; -import org.json.JSONObject; -import org.onap.music.exceptions.MusicLockingException; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.mdbc.DatabasePartition; -import com.att.research.mdbc.Range; -import com.att.research.mdbc.TableInfo; -import com.att.research.mdbc.tables.PartitionInformation; -import com.att.research.mdbc.tables.MusixTxDigestId; -import com.att.research.mdbc.tables.StagingTable; -import com.att.research.mdbc.tables.MriReference; -import com.att.research.mdbc.tables.MusicRangeInformationRow; -import com.att.research.mdbc.tables.TxCommitProgress; - -import org.onap.music.main.MusicCore; - -/** - - * - */ -public class MusicMixin implements MusicInterface { - - public static Map> currentLockMap = new HashMap<>(); - public static List criticalTables = new ArrayList<>(); - - @Override - public String getMixinName() { - // - return null; - } - - @Override - public String getMusicDefaultPrimaryKeyName() { - // - return null; - } - - @Override - public String generateUniqueKey() { - // - return null; - } - - @Override - public String getMusicKeyFromRow(TableInfo ti, String table, JSONObject dbRow) { - // - return null; - } - - @Override - public void close() { - // - - } - - @Override - public void createKeyspace() { - // - - } - - @Override - public void initializeMusicForTable(TableInfo ti, String tableName) { - // - - } - - @Override - public void createDirtyRowTable(TableInfo ti, String tableName) { - // - - } - - @Override - public void dropDirtyRowTable(String tableName) { - // - - } - - @Override - public void clearMusicForTable(String tableName) { - // - - } - - @Override - public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) { - // - - } - - @Override - public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) { - // - - } - - @Override - public List> getDirtyRows(TableInfo ti, String tableName) { - // - return null; - } - - @Override - public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) { - // - - } - - @Override - public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) { - // - - } - - @Override - public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) { - updateDirtyRowAndEntityTableInMusic(tableName, changedRow, false); - - } - - public void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow, boolean isCritical) { - } - - - public static void loadProperties() { - Properties prop = new Properties(); - InputStream input = null; - try { - input = MusicMixin.class.getClassLoader().getResourceAsStream("mdbc.properties"); - prop.load(input); - String crTable = prop.getProperty("critical.tables"); - String[] tableArr = crTable.split(","); - criticalTables = Arrays.asList(tableArr); - - } catch (Exception ex) { - ex.printStackTrace(); - } finally { - if (input != null) { - try { - input.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - - public static void releaseZKLocks(Set lockIds) { - for (LockId lockId : lockIds) { - System.out.println("Releasing lock: " + lockId); - try { - MusicCore.voluntaryReleaseLock(lockId.getFullyQualifiedLockKey(), lockId.getLockReference()); - MusicCore.destroyLockRef(lockId.getFullyQualifiedLockKey(), lockId.getLockReference()); - } catch (MusicLockingException e) { - e.printStackTrace(); - } - } - } - - @Override - public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String tableName, JSONObject changedRow) { - // - return null; - } - - @Override - public void initializeMetricDataStructures() { - // - - } - - @Override - public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) { - return null; - } - - @Override - public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap transactionDigest, String txId, TxCommitProgress progressKeeper) - throws MDBCServiceException { - // TODO Auto-generated method stub - } - - @Override - public HashMap getTransactionDigest(MusixTxDigestId id) { - return null; - } - - @Override - public List getPartitionInformation(DatabasePartition partition) { - return null; - } - - @Override - public MriReference createMusicRangeInformation(MusicRangeInformationRow info) { - return null; - } - - @Override - public void appendToRedoLog(MriReference mriRowId, DatabasePartition partition, MusixTxDigestId newRecord) { - } - - @Override - public void addTxDigest(String musicTxDigestTable, MusixTxDigestId newId, String transactionDigest) { - } - - @Override - public void own(List ranges) { - throw new java.lang.UnsupportedOperationException("function not implemented yet"); - } - - @Override - public void appendRange(String rangeId, List ranges) { - throw new java.lang.UnsupportedOperationException("function not implemented yet"); - } - - @Override - public void relinquish(String ownerId, String rangeId) { - throw new java.lang.UnsupportedOperationException("function not implemented yet"); - } - - @Override - public MusicRangeInformationRow getMusicRangeInformation(UUID id){ - return null; - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java b/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java deleted file mode 100755 index 5615ffb..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java +++ /dev/null @@ -1,786 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Types; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; - -import org.json.JSONObject; -import org.json.JSONTokener; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.MusicSqlManager; -import com.att.research.mdbc.Range; -import com.att.research.mdbc.TableInfo; -import com.att.research.mdbc.tables.OperationType; -import com.att.research.mdbc.tables.StagingTable; - -import net.sf.jsqlparser.JSQLParserException; -import net.sf.jsqlparser.parser.CCJSqlParserUtil; -import net.sf.jsqlparser.statement.delete.Delete; -import net.sf.jsqlparser.statement.insert.Insert; -import net.sf.jsqlparser.statement.update.Update; - -/** - * This class provides the methods that MDBC needs in order to mirror data to/from a - * MySQL or MariaDB database instance. - * This class uses the JSON_OBJECT() database function, which means it requires the following - * minimum versions of either database: - * - * - * - * - *
DATABASEVERSION
MySQL5.7.8
MariaDB10.2.3 (Note: 10.2.3 is currently (July 2017) a beta release)
- * - * @author Robert P. Eby - */ -public class MySQLMixin implements DBInterface { - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MySQLMixin.class); - - public static final String MIXIN_NAME = "mysql"; - public static final String TRANS_TBL = "MDBC_TRANSLOG"; - private static final String CREATE_TBL_SQL = - "CREATE TABLE IF NOT EXISTS "+TRANS_TBL+ - " (IX INT AUTO_INCREMENT, OP CHAR(1), TABLENAME VARCHAR(255), NEWROWDATA VARCHAR(1024), KEYDATA VARCHAR(1024), CONNECTION_ID INT,PRIMARY KEY (IX))"; - - private final MusicSqlManager msm; - private final int connId; - private final String dbName; - private final Connection dbConnection; - private final Map tables; - private boolean server_tbl_created = false; - - public MySQLMixin() { - this.msm = null; - this.connId = 0; - this.dbName = null; - this.dbConnection = null; - this.tables = null; - } - public MySQLMixin(MusicSqlManager msm, String url, Connection conn, Properties info) { - this.msm = msm; - this.connId = generateConnID(conn); - this.dbName = getDBName(conn); - this.dbConnection = conn; - this.tables = new HashMap(); - } - // This is used to generate a unique connId for this connection to the DB. - private int generateConnID(Connection conn) { - int rv = (int) System.currentTimeMillis(); // random-ish - try { - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT CONNECTION_ID() AS IX"); - if (rs.next()) { - rv = rs.getInt("IX"); - } - stmt.close(); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"generateConnID: problem generating a connection ID!"); - } - return rv; - } - - /** - * Get the name of this DBnterface mixin object. - * @return the name - */ - @Override - public String getMixinName() { - return MIXIN_NAME; - } - - @Override - public void close() { - // nothing yet - } - - /** - * Determines the db name associated with the connection - * This is the private/internal method that actually determines the name - * @param conn - * @return - */ - private String getDBName(Connection conn) { - String dbname = "mdbc"; //default name - try { - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT DATABASE() AS DB"); - if (rs.next()) { - dbname = rs.getString("DB"); - } - stmt.close(); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger, "getDBName: problem getting database name from mysql"); - } - return dbname; - } - - public String getDatabaseName() { - return this.dbName; - } - /** - * Get a set of the table names in the database. - * @return the set - */ - @Override - public Set getSQLTableSet() { - Set set = new TreeSet(); - String sql = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=DATABASE() AND TABLE_TYPE='BASE TABLE'"; - try { - Statement stmt = dbConnection.createStatement(); - ResultSet rs = stmt.executeQuery(sql); - while (rs.next()) { - String s = rs.getString("TABLE_NAME"); - set.add(s); - } - stmt.close(); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"getSQLTableSet: "+e); - } - logger.debug(EELFLoggerDelegate.applicationLogger,"getSQLTableSet returning: "+ set); - return set; - } -/* -mysql> describe tables; -+-----------------+---------------------+------+-----+---------+-------+ -| Field | Type | Null | Key | Default | Extra | -+-----------------+---------------------+------+-----+---------+-------+ -| TABLE_CATALOG | varchar(512) | NO | | | | -| TABLE_SCHEMA | varchar(64) | NO | | | | -| TABLE_NAME | varchar(64) | NO | | | | -| TABLE_TYPE | varchar(64) | NO | | | | -| ENGINE | varchar(64) | YES | | NULL | | -| VERSION | bigint(21) unsigned | YES | | NULL | | -| ROW_FORMAT | varchar(10) | YES | | NULL | | -| TABLE_ROWS | bigint(21) unsigned | YES | | NULL | | -| AVG_ROW_LENGTH | bigint(21) unsigned | YES | | NULL | | -| DATA_LENGTH | bigint(21) unsigned | YES | | NULL | | -| MAX_DATA_LENGTH | bigint(21) unsigned | YES | | NULL | | -| INDEX_LENGTH | bigint(21) unsigned | YES | | NULL | | -| DATA_FREE | bigint(21) unsigned | YES | | NULL | | -| AUTO_INCREMENT | bigint(21) unsigned | YES | | NULL | | -| CREATE_TIME | datetime | YES | | NULL | | -| UPDATE_TIME | datetime | YES | | NULL | | -| CHECK_TIME | datetime | YES | | NULL | | -| TABLE_COLLATION | varchar(32) | YES | | NULL | | -| CHECKSUM | bigint(21) unsigned | YES | | NULL | | -| CREATE_OPTIONS | varchar(255) | YES | | NULL | | -| TABLE_COMMENT | varchar(2048) | NO | | | | -+-----------------+---------------------+------+-----+---------+-------+ - */ - /** - * Return a TableInfo object for the specified table. - * This method first looks in a cache of previously constructed TableInfo objects for the table. - * If not found, it queries the INFORMATION_SCHEMA.COLUMNS table to obtain the column names, types, and indexes of the table. - * It creates a new TableInfo object with the results. - * @param tableName the table to look up - * @return a TableInfo object containing the info we need, or null if the table does not exist - */ - @Override - public TableInfo getTableInfo(String tableName) { - TableInfo ti = tables.get(tableName); - if (ti == null) { - try { - String tbl = tableName;//.toUpperCase(); - String sql = "SELECT COLUMN_NAME, DATA_TYPE, COLUMN_KEY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA=DATABASE() AND TABLE_NAME='"+tbl+"'"; - ResultSet rs = executeSQLRead(sql); - if (rs != null) { - ti = new TableInfo(); - while (rs.next()) { - String name = rs.getString("COLUMN_NAME"); - String type = rs.getString("DATA_TYPE"); - String ckey = rs.getString("COLUMN_KEY"); - ti.columns.add(name); - ti.coltype.add(mapDatatypeNameToType(type)); - ti.iskey.add(ckey != null && !ckey.equals("")); - } - rs.getStatement().close(); - } else { - logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL."); - } - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL: "+e); - return null; - } - tables.put(tableName, ti); - } - return ti; - } - // Map MySQL data type names to the java.sql.Types equivalent - private int mapDatatypeNameToType(String nm) { - switch (nm) { - case "tinyint": return Types.TINYINT; - case "smallint": return Types.SMALLINT; - case "mediumint": - case "int": return Types.INTEGER; - case "bigint": return Types.BIGINT; - case "decimal": - case "numeric": return Types.DECIMAL; - case "float": return Types.FLOAT; - case "double": return Types.DOUBLE; - case "date": - case "datetime": return Types.DATE; - case "time": return Types.TIME; - case "timestamp": return Types.TIMESTAMP; - case "char": return Types.CHAR; - case "text": - case "varchar": return Types.VARCHAR; - case "mediumblob": - case "blob": return Types.VARCHAR; - default: - logger.error(EELFLoggerDelegate.errorLogger,"unrecognized and/or unsupported data type "+nm); - return Types.VARCHAR; - } - } - @Override - public void createSQLTriggers(String tableName) { - // Don't create triggers for the table the triggers write into!!! - if (tableName.equals(TRANS_TBL)) - return; - try { - if (!server_tbl_created) { - try { - Statement stmt = dbConnection.createStatement(); - stmt.execute(CREATE_TBL_SQL); - stmt.close(); - logger.info(EELFLoggerDelegate.applicationLogger,"createSQLTriggers: Server side dirty table created."); - server_tbl_created = true; - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: problem creating the "+TRANS_TBL+" table!"); - } - } - - // Give the triggers a way to find this MSM - for (String name : getTriggerNames(tableName)) { - logger.info(EELFLoggerDelegate.applicationLogger,"ADD trigger "+name+" to msm_map"); - //\TODO fix this is an error - //msm.register(name); - } - // No SELECT trigger - executeSQLWrite(generateTrigger(tableName, "INSERT")); - executeSQLWrite(generateTrigger(tableName, "UPDATE")); - executeSQLWrite(generateTrigger(tableName, "DELETE")); - } catch (SQLException e) { - if (e.getMessage().equals("Trigger already exists")) { - //only warn if trigger already exists - logger.warn(EELFLoggerDelegate.applicationLogger, "createSQLTriggers" + e); - } else { - logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: "+e); - } - } - } -/* -CREATE TRIGGER `triggername` BEFORE UPDATE ON `table` -FOR EACH ROW BEGIN -INSERT INTO `log_table` ( `field1` `field2`, ...) VALUES ( NEW.`field1`, NEW.`field2`, ...) ; -END; - -OLD.field refers to the old value -NEW.field refers to the new value -*/ - private String generateTrigger(String tableName, String op) { - boolean isdelete = op.equals("DELETE"); - boolean isinsert = op.equals("INSERT"); - TableInfo ti = getTableInfo(tableName); - StringBuilder newJson = new StringBuilder("JSON_OBJECT("); // JSON_OBJECT(key, val, key, val) page 1766 - StringBuilder keyJson = new StringBuilder("JSON_OBJECT("); - String pfx = ""; - String keypfx = ""; - for (String col : ti.columns) { - newJson.append(pfx) - .append("'").append(col).append("', ") - .append(isdelete ? "OLD." : "NEW.") - .append(col); - if (ti.iskey(col) || !ti.hasKey()) { - keyJson.append(keypfx) - .append("'").append(col).append("', ") - .append(isinsert ? "NEW." : "OLD.") - .append(col); - keypfx = ", "; - } - pfx = ", "; - } - newJson.append(")"); - keyJson.append(")"); - //\TODO check if using mysql driver, so instead check the exception - StringBuilder sb = new StringBuilder() - .append("CREATE TRIGGER ") // IF NOT EXISTS not supported by MySQL! - .append(String.format("%s_%s", op.substring(0, 1), tableName)) - .append(" AFTER ") - .append(op) - .append(" ON ") - .append(tableName) - .append(" FOR EACH ROW INSERT INTO ") - .append(TRANS_TBL) - .append(" (TABLENAME, OP, NEWROWDATA, KEYDATA, CONNECTION_ID) VALUES('") - .append(tableName) - .append("', ") - .append(isdelete ? "'D'" : (op.equals("INSERT") ? "'I'" : "'U'")) - .append(", ") - .append(newJson.toString()) - .append(", ") - .append(keyJson.toString()) - .append(", ") - .append("CONNECTION_ID()") - .append(")"); - return sb.toString(); - } - private String[] getTriggerNames(String tableName) { - return new String[] { - "I_" + tableName, // INSERT trigger - "U_" + tableName, // UPDATE trigger - "D_" + tableName // DELETE trigger - }; - } - - @Override - public void dropSQLTriggers(String tableName) { - try { - for (String name : getTriggerNames(tableName)) { - logger.info(EELFLoggerDelegate.applicationLogger,"REMOVE trigger "+name+" from msmmap"); - executeSQLWrite("DROP TRIGGER IF EXISTS " +name); - //\TODO Fix this is an error - //msm.unregister(name); - } - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"dropSQLTriggers: "+e); - } - } - - @Override - public void insertRowIntoSqlDb(String tableName, Map map) { - TableInfo ti = getTableInfo(tableName); - String sql = ""; - if (rowExists(tableName, ti, map)) { - // Update - Construct the what and where strings for the DB write - StringBuilder what = new StringBuilder(); - StringBuilder where = new StringBuilder(); - String pfx = ""; - String pfx2 = ""; - for (int i = 0; i < ti.columns.size(); i++) { - String col = ti.columns.get(i); - String val = Utils.getStringValue(map.get(col)); - if (ti.iskey.get(i)) { - where.append(pfx).append(col).append("=").append(val); - pfx = " AND "; - } else { - what.append(pfx2).append(col).append("=").append(val); - pfx2 = ", "; - } - } - sql = String.format("UPDATE %s SET %s WHERE %s", tableName, what.toString(), where.toString()); - } else { - // Construct the value string and column name string for the DB write - StringBuilder fields = new StringBuilder(); - StringBuilder values = new StringBuilder(); - String pfx = ""; - for (String col : ti.columns) { - fields.append(pfx).append(col); - values.append(pfx).append(Utils.getStringValue(map.get(col))); - pfx = ", "; - } - sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString()); - } - try { - executeSQLWrite(sql); - } catch (SQLException e1) { - logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite: "+e1); - } - // TODO - remove any entries from MDBC_TRANSLOG corresponding to this update - // SELECT IX, OP, KEYDATA FROM MDBC_TRANS_TBL WHERE CONNID = "+connId AND TABLENAME = tblname - } - - private boolean rowExists(String tableName, TableInfo ti, Map map) { - StringBuilder where = new StringBuilder(); - String pfx = ""; - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - String col = ti.columns.get(i); - String val = Utils.getStringValue(map.get(col)); - where.append(pfx).append(col).append("=").append(val); - pfx = " AND "; - } - } - String sql = String.format("SELECT * FROM %s WHERE %s", tableName, where.toString()); - ResultSet rs = executeSQLRead(sql); - try { - boolean rv = rs.next(); - rs.close(); - return rv; - } catch (SQLException e) { - return false; - } - } - - - @Override - public void deleteRowFromSqlDb(String tableName, Map map) { - TableInfo ti = getTableInfo(tableName); - StringBuilder where = new StringBuilder(); - String pfx = ""; - for (int i = 0; i < ti.columns.size(); i++) { - if (ti.iskey.get(i)) { - String col = ti.columns.get(i); - Object val = map.get(col); - where.append(pfx).append(col).append("=").append(Utils.getStringValue(val)); - pfx = " AND "; - } - } - try { - String sql = String.format("DELETE FROM %s WHERE %s", tableName, where.toString()); - executeSQLWrite(sql); - } catch (SQLException e) { - e.printStackTrace(); - } - } - - /** - * This method executes a read query in the SQL database. Methods that call this method should be sure - * to call resultset.getStatement().close() when done in order to free up resources. - * @param sql the query to run - * @return a ResultSet containing the rows returned from the query - */ - @Override - public ResultSet executeSQLRead(String sql) { - logger.debug(EELFLoggerDelegate.applicationLogger,"executeSQLRead"); - logger.debug("Executing SQL read:"+ sql); - ResultSet rs = null; - try { - Statement stmt = dbConnection.createStatement(); - rs = stmt.executeQuery(sql); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"executeSQLRead"+e); - } - return rs; - } - - /** - * This method executes a write query in the sql database. - * @param sql the SQL to be sent to MySQL - * @throws SQLException if an underlying JDBC method throws an exception - */ - protected void executeSQLWrite(String sql) throws SQLException { - logger.debug(EELFLoggerDelegate.applicationLogger, "Executing SQL write:"+ sql); - - Statement stmt = dbConnection.createStatement(); - stmt.execute(sql); - stmt.close(); - } - - /** - * Code to be run within the DB driver before a SQL statement is executed. This is where tables - * can be synchronized before a SELECT, for those databases that do not support SELECT triggers. - * @param sql the SQL statement that is about to be executed - * @return list of keys that will be updated, if they can't be determined afterwards (i.e. sql table doesn't have primary key) - */ - @Override - public void preStatementHook(final String sql) { - if (sql == null) { - return; - } - String cmd = sql.trim().toLowerCase(); - if (cmd.startsWith("select")) { - String[] parts = sql.trim().split(" "); - Set set = getSQLTableSet(); - for (String part : parts) { - if (set.contains(part.toUpperCase())) { - // Found a candidate table name in the SELECT SQL -- update this table - //msm.readDirtyRowsAndUpdateDb(part); - } - } - } - } - - /** - * Code to be run within the DB driver after a SQL statement has been executed. This is where remote - * statement actions can be copied back to Cassandra/MUSIC. - * @param sql the SQL statement that was executed - */ - @Override - public void postStatementHook(final String sql,Map transactionDigest) { - if (sql != null) { - String[] parts = sql.trim().split(" "); - String cmd = parts[0].toLowerCase(); - if ("delete".equals(cmd) || "insert".equals(cmd) || "update".equals(cmd)) { - try { - this.updateStagingTable(transactionDigest); - } catch (NoSuchFieldException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - } - } - - private OperationType toOpEnum(String operation) throws NoSuchFieldException { - switch (operation.toLowerCase()) { - case "i": - return OperationType.INSERT; - case "d": - return OperationType.DELETE; - case "u": - return OperationType.UPDATE; - case "s": - return OperationType.SELECT; - default: - logger.error(EELFLoggerDelegate.errorLogger,"Invalid operation selected: ["+operation+"]"); - throw new NoSuchFieldException("Invalid operation enum"); - } - - } - /** - * Copy data that is in transaction table into music interface - * @param transactionDigests - * @throws NoSuchFieldException - */ - private void updateStagingTable(Map transactionDigests) throws NoSuchFieldException { - // copy from DB.MDBC_TRANSLOG where connid == myconnid - // then delete from MDBC_TRANSLOG - String sql2 = "SELECT IX, TABLENAME, OP, KEYDATA, NEWROWDATA FROM "+TRANS_TBL +" WHERE CONNECTION_ID = " + this.connId; - try { - ResultSet rs = executeSQLRead(sql2); - Set rows = new TreeSet(); - while (rs.next()) { - int ix = rs.getInt("IX"); - String op = rs.getString("OP"); - OperationType opType = toOpEnum(op); - String tbl = rs.getString("TABLENAME"); - String keydataStr = rs.getString("KEYDATA"); - String newRowStr = rs.getString("NEWROWDATA"); - JSONObject newRow = new JSONObject(new JSONTokener(newRowStr)); - String musicKey; - TableInfo ti = getTableInfo(tbl); - if (!ti.hasKey()) { - //create music key - //\TODO fix, this is completely broken - //if (op.startsWith("I")) { - //\TODO Improve the generation of primary key, it should be generated using - // the actual columns, otherwise performance when doing range queries are going - // to be even worse (see the else bracket down) - // - musicKey = msm.generateUniqueKey(); - /*} else { - //get key from data - musicKey = msm.getMusicKeyFromRowWithoutPrimaryIndexes(tbl,newRow); - }*/ - newRow.put(msm.getMusicDefaultPrimaryKeyName(), musicKey); - } - else { - //Use the keys - musicKey = msm.getMusicKeyFromRow(tbl, newRow); - if(musicKey.isEmpty()) { - logger.error(EELFLoggerDelegate.errorLogger,"Primary key is invalid: ["+tbl+","+op+"]"); - throw new NoSuchFieldException("Invalid operation enum"); - } - } - Range range = new Range(tbl); - if(!transactionDigests.containsKey(range)) { - transactionDigests.put(range, new StagingTable()); - } - transactionDigests.get(range).addOperation(musicKey, opType, newRow.toString()); - rows.add(ix); - } - rs.getStatement().close(); - if (rows.size() > 0) { - sql2 = "DELETE FROM "+TRANS_TBL+" WHERE IX = ?"; - PreparedStatement ps = dbConnection.prepareStatement(sql2); - logger.debug("Executing: "+sql2); - logger.debug(" For ix = "+rows); - for (int ix : rows) { - ps.setInt(1, ix); - ps.execute(); - } - ps.close(); - } - } catch (SQLException e) { - logger.warn("Exception in postStatementHook: "+e); - e.printStackTrace(); - } - } - - - - /** - * Update music with data from MySQL table - * - * @param tableName - name of table to update in music - */ - @Override - public void synchronizeData(String tableName) { - ResultSet rs = null; - TableInfo ti = getTableInfo(tableName); - String query = "SELECT * FROM "+tableName; - - try { - rs = executeSQLRead(query); - if(rs==null) return; - while(rs.next()) { - - JSONObject jo = new JSONObject(); - if (!getTableInfo(tableName).hasKey()) { - String musicKey = msm.generateUniqueKey(); - jo.put(msm.getMusicDefaultPrimaryKeyName(), musicKey); - } - - for (String col : ti.columns) { - jo.put(col, rs.getString(col)); - } - - @SuppressWarnings("unused") - Object[] row = Utils.jsonToRow(ti,tableName, jo,msm.getMusicDefaultPrimaryKeyName()); - //\FIXME this is wrong now, update of the dirty row and entity is now handled by the archival process - //msm.updateDirtyRowAndEntityTableInMusic(ti,tableName, jo); - } - } catch (Exception e) { - logger.error(EELFLoggerDelegate.errorLogger, "synchronizing data " + tableName + - " -> " + e.getMessage()); - } - finally { - try { - rs.close(); - } catch (SQLException e) { - //continue - } - } - - } - - /** - * Return a list of "reserved" names, that should not be used by MySQL client/MUSIC - * These are reserved for mdbc - */ - @Override - public List getReservedTblNames() { - ArrayList rsvdTables = new ArrayList(); - rsvdTables.add(TRANS_TBL); - //Add others here as necessary - return rsvdTables; - } - @Override - public String getPrimaryKey(String sql, String tableName) { - // - return null; - } - - @SuppressWarnings("unused") - @Deprecated - private ArrayList getMusicKey(String sql) { - try { - net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql); - if (stmt instanceof Insert) { - Insert s = (Insert) stmt; - String tbl = s.getTable().getName(); - return getMusicKey(tbl, "INSERT", sql); - } else if (stmt instanceof Update){ - Update u = (Update) stmt; - String tbl = u.getTables().get(0).getName(); - return getMusicKey(tbl, "UPDATE", sql); - } else if (stmt instanceof Delete) { - Delete d = (Delete) stmt; - //TODO: IMPLEMENT - String tbl = d.getTable().getName(); - return getMusicKey(tbl, "DELETE", sql); - } else { - System.err.println("Not recognized sql type"); - } - - } catch (JSQLParserException e) { - - e.printStackTrace(); - } - //Something went wrong here - return new ArrayList(); - } - - /** - * Returns all keys that matches the current sql statement, and not in already updated keys. - * - * @param tbl - * @param cmd - * @param sql - */ - @Deprecated - private ArrayList getMusicKey(String tbl, String cmd, String sql) { - ArrayList musicKeys = new ArrayList(); - /* - if (cmd.equalsIgnoreCase("insert")) { - //create key, return key - musicKeys.add(msm.generatePrimaryKey()); - } else if (cmd.equalsIgnoreCase("update") || cmd.equalsIgnoreCase("delete")) { - try { - net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql); - String where; - if (stmt instanceof Update) { - where = ((Update) stmt).getWhere().toString(); - } else if (stmt instanceof Delete) { - where = ((Delete) stmt).getWhere().toString(); - } else { - System.err.println("Unknown type: " +stmt.getClass()); - where = ""; - } - ResultSet rs = executeSQLRead("SELECT * FROM " + tbl + " WHERE " + where); - musicKeys = msm.getMusicKeysWhere(tbl, Utils.parseResults(getTableInfo(tbl), rs)); - } catch (JSQLParserException e) { - - e.printStackTrace(); - } catch (SQLException e) { - //Not a valid sql query - e.printStackTrace(); - } - } - */ - return musicKeys; - } - - - @Deprecated - public void insertRowIntoSqlDbOLD(String tableName, Map map) { - // First construct the value string and column name string for the db write - TableInfo ti = getTableInfo(tableName); - StringBuilder fields = new StringBuilder(); - StringBuilder values = new StringBuilder(); - String pfx = ""; - for (String col : ti.columns) { - fields.append(pfx).append(col); - values.append(pfx).append(Utils.getStringValue(map.get(col))); - pfx = ", "; - } - - try { - String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString()); - executeSQLWrite(sql); - } catch (SQLException e) { - logger.error(EELFLoggerDelegate.errorLogger,"Insert failed because row exists, do an update"); - StringBuilder where = new StringBuilder(); - pfx = ""; - String pfx2 = ""; - fields.setLength(0); - for (int i = 0; i < ti.columns.size(); i++) { - String col = ti.columns.get(i); - String val = Utils.getStringValue(map.get(col)); - if (ti.iskey.get(i)) { - where.append(pfx).append(col).append("=").append(val); - pfx = " AND "; - } else { - fields.append(pfx2).append(col).append("=").append(val); - pfx2 = ", "; - } - } - String sql = String.format("UPDATE %s SET %s WHERE %s", tableName, fields.toString(), where.toString()); - try { - executeSQLWrite(sql); - } catch (SQLException e1) { - logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite"+e1); - } - } - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/Utils.java b/src/main/java/com/att/research/mdbc/mixins/Utils.java deleted file mode 100755 index 22df08f..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/Utils.java +++ /dev/null @@ -1,220 +0,0 @@ -package com.att.research.mdbc.mixins; - -import java.io.IOException; -import java.io.InputStream; -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Properties; - -import org.json.JSONObject; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.TableInfo; -import com.datastax.driver.core.utils.Bytes; - -/** - * Utility functions used by several of the mixins should go here. - * - * @author Robert P. Eby - */ -public class Utils { - private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Utils.class); - - /** - * Transforms and JsonObject into an array of objects - * @param ti information related to the table - * @param tbl table that jo belong to - * @param jo object that represents a row in the table - * @param musicDefaultPrimaryKeyName contains the name of key associated with the default primary key used by MUSIC, it can be null, if not requird - * @return array with the objects in the row - */ - public static Object[] jsonToRow(TableInfo ti, String tbl, JSONObject jo, String musicDefaultPrimaryKeyName) { - int columnSize = ti.columns.size(); - ArrayList rv = new ArrayList(); - if (musicDefaultPrimaryKeyName!=null && jo.has(musicDefaultPrimaryKeyName)) { - rv.add(jo.getString(musicDefaultPrimaryKeyName)); - } - for (int i = 0; i < columnSize; i++) { - String colname = ti.columns.get(i); - switch (ti.coltype.get(i)) { - case Types.BIGINT: - rv.add(jo.optLong(colname, 0)); - break; - case Types.BOOLEAN: - rv.add(jo.optBoolean(colname, false)); - break; - case Types.BLOB: - rv.add(jo.optString(colname, "")); - break; - case Types.DECIMAL: - rv.add(jo.optBigDecimal(colname, BigDecimal.ZERO)); - break; - case Types.DOUBLE: - rv.add(jo.optDouble(colname, 0)); - break; - case Types.INTEGER: - rv.add(jo.optInt(colname, 0)); - break; - case Types.TIMESTAMP: - //rv[i] = new Date(jo.optString(colname, "")); - rv.add(jo.optString(colname, "")); - break; - case Types.DATE: - case Types.VARCHAR: - //Fall through - default: - rv.add(jo.optString(colname, "")); - break; - } - } - return rv.toArray(); - } - - /** - * Return a String equivalent of an Object. Useful for writing SQL. - * @param val the object to String-ify - * @return the String value - */ - public static String getStringValue(Object val) { - if (val == null) - return "NULL"; - if (val instanceof String) - return "'" + val.toString().replaceAll("'", "''") + "'"; // double any quotes - if (val instanceof Number) - return ""+val; - if (val instanceof ByteBuffer) - return "'" + Bytes.toHexString((ByteBuffer)val).substring(2) + "'"; // substring(2) is to remove the "0x" at front - if (val instanceof Date) - return "'" + (new Timestamp(((Date)val).getTime())).toString() + "'"; - // Boolean, and anything else - return val.toString(); - } - - /** - * Parse result set and put into object array - * @param tbl - * @param rs - * @return - * @throws SQLException - */ - public static ArrayList parseResults(TableInfo ti, ResultSet rs) throws SQLException { - ArrayList results = new ArrayList(); - while (rs.next()) { - Object[] row = new Object[ti.columns.size()]; - for (int i = 0; i < ti.columns.size(); i++) { - String colname = ti.columns.get(i); - switch (ti.coltype.get(i)) { - case Types.BIGINT: - row[i] = rs.getLong(colname); - break; - case Types.BOOLEAN: - row[i] = rs.getBoolean(colname); - break; - case Types.BLOB: - System.err.println("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname); - //logger.error("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname); - // throw an exception here??? - break; - case Types.DOUBLE: - row[i] = rs.getDouble(colname); - break; - case Types.INTEGER: - row[i] = rs.getInt(colname); - break; - case Types.TIMESTAMP: - //rv[i] = new Date(jo.optString(colname, "")); - row[i] = rs.getString(colname); - break; - case Types.VARCHAR: - //Fall through - default: - row[i] = rs.getString(colname); - break; - } - } - results.add(row); - } - return results; - } - - @SuppressWarnings("unused") - static List> getClassesImplementing(Class implx) { - Properties pr = null; - try { - pr = new Properties(); - pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties")); - } - catch (IOException e) { - logger.error(EELFLoggerDelegate.errorLogger, "Could not load property file > " + e.getMessage()); - } - - List> list = new ArrayList>(); - if (pr==null) { - return list; - } - String mixins = pr.getProperty("MIXINS"); - for (String className: mixins.split("[ ,]")) { - try { - Class cl = Class.forName(className.trim()); - if (MixinFactory.impl(cl, implx)) { - list.add(cl); - } - } catch (ClassNotFoundException e) { - logger.error(EELFLoggerDelegate.errorLogger,"Mixin class "+className+" not found."); - } - } - return list; - } - - public static void registerDefaultDrivers() { - Properties pr = null; - try { - pr = new Properties(); - pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties")); - } - catch (IOException e) { - logger.error("Could not load property file > " + e.getMessage()); - } - - @SuppressWarnings("unused") - List> list = new ArrayList>(); - String drivers = pr.getProperty("DEFAULT_DRIVERS"); - for (String driver: drivers.split("[ ,]")) { - logger.info(EELFLoggerDelegate.applicationLogger, "Registering jdbc driver '" + driver + "'"); - try { - @SuppressWarnings("unused") - Class cl = Class.forName(driver.trim()); - } catch (ClassNotFoundException e) { - logger.error(EELFLoggerDelegate.errorLogger,"Driver class "+driver+" not found."); - } - } - } - - public static Properties getMdbcProperties() { - Properties prop = new Properties(); - InputStream input = null; - try { - input = Utils.class.getClassLoader().getResourceAsStream("/mdbc.properties"); - prop.load(input); - } catch (Exception e) { - logger.warn(EELFLoggerDelegate.applicationLogger, "Could load mdbc.properties." - + "Proceeding with defaults " + e.getMessage()); - } finally { - if (input != null) { - try { - input.close(); - } catch (IOException e) { - logger.error(EELFLoggerDelegate.errorLogger, e.getMessage()); - } - } - } - return prop; - } -} diff --git a/src/main/java/com/att/research/mdbc/mixins/package-info.java b/src/main/java/com/att/research/mdbc/mixins/package-info.java deleted file mode 100755 index edad7e8..0000000 --- a/src/main/java/com/att/research/mdbc/mixins/package-info.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - *

- * This package provides the "mixins" to use when constructing a MusicSqlManager. The mixins define how MusicSqlManager - * will interface both to the database being mirrored (via the {@link com.att.research.mdbc.mixins.DBInterface} interface), - * and how it will interface to the persistence layer provided by MUSIC (via the {@link com.att.research.mdbc.mixins.MusicInterface} - * interface). - *

- *

- * The choice of which mixins to use is determined by the MusicSqlManager constructor. - * It will decide based upon the URL and connection properties with which it is presented (from the - * {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call). - *

- *

- * The list of mixins that may be selected from is stored in the properties files mdbc.properties - * under the name MIXINS. This implementation provides the following mixins: - *

- * - * - * - * - * - * - * - *
NameClassDescription
cassandrac.a.r.m.m.CassandraMixinA Cassandra based - * persistence layer (without any of the table locking that MUSIC normally provides).
cassandra2c.a.r.m.m.Cassandra2MixinSimilar to the cassandra mixin, but stores all - * dirty row information in one table, rather than one table per real table.
h2c.a.r.m.m.H2MixinThis mixin provides access to either an in-memory, or a local - * (file-based) version of the H2 database.
h2serverc.a.r.m.m.H2ServerMixinThis mixin provides access to a copy of the H2 database - * running as a server. Because the server needs special Java classes in order to handle certain TRIGGER actions, the - * server must be et up in a special way (see below).
mysqlc.a.r.m.m.MySQLMixinThis mixin provides access to MySQL running on a remote server.
- *

Starting the H2 Server

- *

- * The H2 Server, when used with MDBC, must contain the MDBC Trigger class, and supporting libraries. - * This can be done as follows: - *

- *
- *	CLASSPATH=$PWD/target/mdbc-h2server-0.0.1-SNAPSHOT.jar
- *	CLASSPATH=$CLASSPATH:$HOME/.m2/repository/com/h2database/h2/1.3.168/h2-1.3.168.jar
- *	CLASSPATH=$CLASSPATH:$HOME/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar
- *	CLASSPATH=$CLASSPATH:$HOME/.m2/repository/org/json/json/20160810/json-20160810.jar
- *	export CLASSPATH
- *	java org.h2.tools.Server
- * 
- *

- * The mdbc-h2server-0.0.1-SNAPSHOT.jar file is built with Maven using the pom-h2server.xml pom file. - *

- */ -package com.att.research.mdbc.mixins; diff --git a/src/main/java/com/att/research/mdbc/package-info.java b/src/main/java/com/att/research/mdbc/package-info.java deleted file mode 100755 index 5ad59c8..0000000 --- a/src/main/java/com/att/research/mdbc/package-info.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - *

- * This package provides a JDBC driver that can be used to mirror the contents of a database to and from - * Cassandra. The mirroring occurs as a side effect of - * execute() statements against a JDBC connection, and triggers placed in the database to catch database modifications. - * The initial implementation is written to mirror an H2 database. - *

- *

- * This JDBC driver will intercept all table creations, SELECTs, INSERTs, DELETEs, and UPDATEs made to the underlying - * database, and make sure they are copied to Cassandra. In addition, for every table XX that is created, another table - * DIRTY_XX will be created to communicate the existence of dirty rows to other Cassandra replicas (with the - * Cassandra2 Mixin, the table is called DIRTY____ and there is only one table). Dirty rows - * will be copied, as needed back into the database from Cassandra before any SELECT. - *

- *

To use with JDBC

- *
    - *
  1. Add this jar, and all dependent jars to your CLASSPATH.
  2. - *
  3. Rewrite your JDBC URLs from jdbc:h2:... to jdbc:mdbc:.... - *
  4. If you supply properties to the {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call, - * use the following optional properties to control behavior of the proxy: - * - * - * - * - * - * - * - * - * - * - *
    Property NameProperty ValueDefault Value
    MDBC_DB_MIXINThe mixin name to use to select the database mixin to use for this connection.
    MDBC_MUSIC_MIXINThe mixin name to use to select the MUSIC mixin to use for this connection.
    myidThe ID of this replica in the collection of replicas sharing the same tables.0
    replicasA comma-separated list of replica names for the collection of replicas sharing the same tables.the value of myid
    music_keyspaceThe keyspace name to use in Cassandra for all tables created by this instance of MDBC.mdbc
    music_addressThe IP address to use to connect to Cassandra.localhost
    music_rfactorThe replication factor to use for the new keyspace that is created.2
    disabledIf set to true the mirroring is completely disabled; this is the equivalent of using the database driver directly.false
    - *
  5. - *
  6. Load the driver using the following call: - *
    - *	Class.forName("com.att.research.mdbc.ProxyDriver");
    - * 
  7. - *
- *

Because, under the current design, the MDBC driver must be running within the same JVM as the database, MDBC - * will only explicitly support in-memory databases (URL of jdbc:mdbc:mem:...), or local file - * databases (URL of jdbc:mdbc:/path/to/file). Attempts to access a remote H2 server (URL - * jdbc:mdbc:tcp://host/path/to/db) will probably not work, although MDBC will not stop you from trying. - *

- * - *

To Define a Tomcat DataSource Resource

- *

The following code snippet can be used as a guide when setting up a Tomcat DataSource Resource. - * This snippet goes in the server.xml file. The items in bold indicate changed or new items:

- *
- * <Resource name="jdbc/ProcessEngine"
- *	auth="Container"
- *	type="javax.sql.DataSource"
- *	factory="org.apache.tomcat.jdbc.pool.DataSourceFactory"
- *	uniqueResourceName="process-engine"
- *	driverClassName="com.att.research.mdbc.ProxyDriver"
- *	url="jdbc:mdbc:./camunda-h2-dbs/process-engine;MVCC=TRUE;TRACE_LEVEL_FILE=0;DB_CLOSE_ON_EXIT=FALSE"
- *	connectionProperties="myid=0;replicas=0,1,2;music_keyspace=camunda;music_address=localhost"
- *	username="sa"
- *	password="sa"
- *	maxActive="20"
- *	minIdle="5" />
- * 
- * - *

To Define a JBoss DataSource

- *

The following code snippet can be used as a guide when setting up a JBoss DataSource. - * This snippet goes in the service.xml file. The items in bold indicate changed or new items:

- *
- * <datasources>
- *   <datasource jta="true" jndi-name="java:jboss/datasources/ProcessEngine" pool-name="ProcessEngine" enabled="true" use-java-context="true" use-ccm="true">
- *      <connection-url>jdbc:mdbc:/opt/jboss-eap-6.2.4/standalone/camunda-h2-dbs/process-engine;DB_CLOSE_DELAY=-1;MVCC=TRUE;DB_CLOSE_ON_EXIT=FALSE</connection-url>
- *      <connection-property name="music_keyspace">
- *        camunda
- *      </connection-property>
- *      <driver>mdbc</driver>
- *      <security>
- *        <user-name>sa</user-name>
- *        <password>sa</password>
- *      </security>
- *    </datasource>
- *    <drivers>
- *      <driver name="mdbc" module="com.att.research.mdbc">
- *        <driver-class>com.att.research.mdbc.ProxyDriver</driver-class>
- *      </driver>
- *    </drivers>
- *  </datasources>
- * 
- *

Note: This assumes that you have built and installed the com.att.research.mdbc module within JBoss. - */ -package com.att.research.mdbc; diff --git a/src/main/java/com/att/research/mdbc/tables/MriReference.java b/src/main/java/com/att/research/mdbc/tables/MriReference.java deleted file mode 100644 index a1d0b61..0000000 --- a/src/main/java/com/att/research/mdbc/tables/MriReference.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.util.UUID; - -public final class MriReference { - public final String table; - public final UUID index; - - public MriReference(String table, UUID index) { - this.table = table; - this.index= index; - } - -} diff --git a/src/main/java/com/att/research/mdbc/tables/MusicRangeInformationRow.java b/src/main/java/com/att/research/mdbc/tables/MusicRangeInformationRow.java deleted file mode 100644 index e069666..0000000 --- a/src/main/java/com/att/research/mdbc/tables/MusicRangeInformationRow.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.util.List; -import java.util.UUID; - -public final class MusicRangeInformationRow { - public final UUID index; - public final PartitionInformation partition; - public final List redoLog; - - public MusicRangeInformationRow(UUID index, List redoLog, PartitionInformation partition) { - this.index = index; - this.redoLog = redoLog; - this.partition = partition; - } -} diff --git a/src/main/java/com/att/research/mdbc/tables/MusixTxDigestId.java b/src/main/java/com/att/research/mdbc/tables/MusixTxDigestId.java deleted file mode 100644 index 71e715a..0000000 --- a/src/main/java/com/att/research/mdbc/tables/MusixTxDigestId.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.util.UUID; - -public final class MusixTxDigestId { - public final UUID tablePrimaryKey; - - public MusixTxDigestId(UUID primaryKey) { - this.tablePrimaryKey= primaryKey; - } - - public boolean isEmpty() { - return (this.tablePrimaryKey==null); - } -} diff --git a/src/main/java/com/att/research/mdbc/tables/Operation.java b/src/main/java/com/att/research/mdbc/tables/Operation.java deleted file mode 100644 index abb21c8..0000000 --- a/src/main/java/com/att/research/mdbc/tables/Operation.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.io.Serializable; - -import org.json.JSONObject; -import org.json.JSONTokener; - -public final class Operation implements Serializable{ - - private static final long serialVersionUID = -1215301985078183104L; - - final OperationType TYPE; - final String NEW_VAL; - - public Operation(OperationType type, String newVal) { - TYPE = type; - NEW_VAL = newVal; - } - - public JSONObject getNewVal(){ - JSONObject newRow = new JSONObject(new JSONTokener(NEW_VAL)); - return newRow; - } - - public OperationType getOperationType() { - return this.TYPE; - } -} diff --git a/src/main/java/com/att/research/mdbc/tables/OperationType.java b/src/main/java/com/att/research/mdbc/tables/OperationType.java deleted file mode 100644 index ae83485..0000000 --- a/src/main/java/com/att/research/mdbc/tables/OperationType.java +++ /dev/null @@ -1,5 +0,0 @@ -package com.att.research.mdbc.tables; - -public enum OperationType{ - DELETE, UPDATE, INSERT, SELECT -} diff --git a/src/main/java/com/att/research/mdbc/tables/PartitionInformation.java b/src/main/java/com/att/research/mdbc/tables/PartitionInformation.java deleted file mode 100644 index 0e4b80d..0000000 --- a/src/main/java/com/att/research/mdbc/tables/PartitionInformation.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.util.List; - -public class PartitionInformation { - public final List tables; - - public PartitionInformation(List tables) { - this.tables=tables; - } -} diff --git a/src/main/java/com/att/research/mdbc/tables/StagingTable.java b/src/main/java/com/att/research/mdbc/tables/StagingTable.java deleted file mode 100644 index f5e5101..0000000 --- a/src/main/java/com/att/research/mdbc/tables/StagingTable.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.io.Serializable; -import java.util.Deque; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.Set; -import org.apache.commons.lang3.tuple.Pair; -import org.json.JSONObject; - -import com.att.research.logging.EELFLoggerDelegate; - -public class StagingTable implements Serializable{ - /** - * - */ - private static final long serialVersionUID = 7583182634761771943L; - private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StagingTable.class); - //primary key -> Operation - private HashMap> operations; - - public StagingTable() { - operations = new HashMap<>(); - } - - synchronized public void addOperation(String key, OperationType type, String newVal) { - if(!operations.containsKey(key)) { - operations.put(key, new LinkedList<>()); - } - operations.get(key).add(new Operation(type,newVal)); - } - - synchronized public Deque> getIterableSnapshot() throws NoSuchFieldException{ - Deque> response=new LinkedList>(); - //\TODO: check if we can just return the last change to a given key - Set keys = operations.keySet(); - for(String key : keys) { - Deque ops = operations.get(key); - if(ops.isEmpty()) { - logger.error(EELFLoggerDelegate.errorLogger, "Invalid state of the Operation data structure when creating snapshot"); - throw new NoSuchFieldException("Invalid state of the operation data structure"); - } - response.add(Pair.of(key,ops.getLast())); - } - return response; - } - - synchronized public void clean() { - operations.clear(); - } -} diff --git a/src/main/java/com/att/research/mdbc/tables/TxCommitProgress.java b/src/main/java/com/att/research/mdbc/tables/TxCommitProgress.java deleted file mode 100644 index d87e33d..0000000 --- a/src/main/java/com/att/research/mdbc/tables/TxCommitProgress.java +++ /dev/null @@ -1,206 +0,0 @@ -package com.att.research.mdbc.tables; - -import java.math.BigInteger; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import com.datastax.driver.core.utils.UUIDs; - -import com.att.research.logging.EELFLoggerDelegate; - -import java.sql.Connection; -import java.util.concurrent.atomic.AtomicReference; - - -public class TxCommitProgress{ - private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TxCommitProgress.class); - - private Map transactionInfo; - - public TxCommitProgress(){ - transactionInfo = new ConcurrentHashMap<>(); - } - - public boolean containsTx(String txId) { - return transactionInfo.containsKey(txId); - } - - public UUID getCommitId(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog.isCommitIdAssigned()) { - return prog.getCommitId(); - } - UUID commitId = UUIDs.random(); - prog.setCommitId(commitId); - return commitId; - } - - public void createNewTransactionTracker(String id, Connection conn) { - transactionInfo.put(id, new CommitProgress(id,conn)); - } - - public void commitRequested(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing commit request",txId); - } - prog.setCommitRequested(); - } - - public void setSQLDone(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of SQL",txId); - } - prog.setSQLCompleted(); - } - - public void setMusicDone(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of Music",txId); - } - prog.setMusicCompleted(); - } - - public Connection getConnection(String txId){ - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when retrieving statement",txId); - } - return prog.getConnection(); - } - - public void setRecordId(String txId, MusixTxDigestId recordId){ - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when setting record Id",txId); - } - prog.setRecordId(recordId); - } - - public MusixTxDigestId getRecordId(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when getting record Id",txId); - } - return prog.getRecordId(); - } - - public boolean isRecordIdAssigned(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking record",txId); - } - return prog.isRedoRecordAssigned(); - } - - public boolean isComplete(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking completion",txId); - } - return prog.isComplete(); - } - - public void reinitializeTxProgress(String txId) { - CommitProgress prog = transactionInfo.get(txId); - if(prog == null){ - logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when reinitializing tx progress",txId); - } - prog.reinitialize(); - } - - public void deleteTxProgress(String txId){ - transactionInfo.remove(txId); - } -} - -final class CommitProgress{ - private String lTxId; // local transaction id - private UUID commitId; // commit id - private boolean commitRequested; //indicates if the user tried to commit the request already. - private boolean SQLDone; // indicates if SQL was already committed - private boolean MusicDone; // indicates if music commit was already performed, atomic bool - private Connection connection;// reference to a connection object. This is used to complete a commit if it failed in the original thread. - private Long timestamp; // last time this data structure was updated - private MusixTxDigestId musixTxDigestId;// record id for each partition - - public CommitProgress(String id,Connection conn){ - musixTxDigestId =null; - lTxId = id; - commitRequested = false; - SQLDone = false; - MusicDone = false; - connection = conn; - commitId = null; - timestamp = System.currentTimeMillis(); - } - - public synchronized boolean isComplete() { - return commitRequested && SQLDone && MusicDone; - } - - public synchronized void setCommitId(UUID commitId) { - this.commitId = commitId; - timestamp = System.currentTimeMillis(); - } - - public synchronized void reinitialize() { - commitId = null; - musixTxDigestId =null; - commitRequested = false; - SQLDone = false; - MusicDone = false; - timestamp = System.currentTimeMillis(); - } - - public synchronized void setCommitRequested() { - commitRequested = true; - timestamp = System.currentTimeMillis(); - } - - public synchronized void setSQLCompleted() { - SQLDone = true; - timestamp = System.currentTimeMillis(); - } - - public synchronized void setMusicCompleted() { - MusicDone = true; - timestamp = System.currentTimeMillis(); - } - - public Connection getConnection() { - timestamp = System.currentTimeMillis(); - return connection; - } - - public long getTimestamInMillis() { - return timestamp; - } - - public synchronized void setRecordId(MusixTxDigestId id) { - musixTxDigestId = id; - timestamp = System.currentTimeMillis(); - } - - public synchronized boolean isRedoRecordAssigned() { - return this.musixTxDigestId !=null; - } - - public synchronized MusixTxDigestId getRecordId() { - return musixTxDigestId; - } - - public synchronized UUID getCommitId() { - return commitId; - } - - public synchronized String getId() { - return this.lTxId; - } - - public synchronized boolean isCommitIdAssigned() { - return this.commitId!= null; - } -} \ No newline at end of file diff --git a/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java b/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java deleted file mode 100644 index 721b389..0000000 --- a/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java +++ /dev/null @@ -1,419 +0,0 @@ -package com.att.research.mdbc.tests; - -//import java.sql.Connection; -//import java.sql.DriverManager; -//import java.sql.PreparedStatement; -//import java.sql.ResultSet; -//import java.sql.SQLException; -//import java.sql.Statement; -//import java.util.HashSet; -//import java.util.Properties; -//import java.util.Set; -// -//import org.h2.tools.Server; -//import org.junit.After; -//import org.junit.AfterClass; -//import org.junit.Before; -//import org.junit.BeforeClass; -//import org.junit.Test; -//import org.slf4j.Logger; -//import org.slf4j.LoggerFactory; -// -//import com.mysql.jdbc.jdbc2.optional.MysqlDataSource; - - -//@FixMethodOrder(MethodSorters.NAME_ASCENDING) -//@RunWith(ConcurrentTestRunner.class) -public class ConnectionTest { -// -//// static { -//// System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "INFO"); -//// System.setProperty(org.slf4j.impl.SimpleLogger.LOG_FILE_KEY, String.format("ComparativeAnalysisTest-%d.log", System.currentTimeMillis())); -//// } -// private static final Logger LOG = LoggerFactory.getLogger(ConnectionTest.class); -// -// Set runningThreads = new HashSet(); -// -// @BeforeClass -// public static void setUpBeforeClass() throws Exception { -// -// } -// -// @AfterClass -// public static void tearDownAfterClass() throws Exception { -// -// } -// -// @Before -// public void setUp() throws Exception { -// -// } -// -// @After -// public void tearDown() throws Exception { -// -// } -// -// //@Test -// public void test01() { -// System.out.println("TEST 1: Getting ready for testing connection to Cassandra"); -// -// final CassandraConnector client = new CassandraConnector(); -// final String ipAddress = "localhost"; -// final int port = 9042; -// LOG.info("Connecting to IP Address " + ipAddress + ":" + port + "..."); -// client.connect(ipAddress, port); -// client.close(); -// System.out.println(); -// } -// -// /** -// * Tests for using jdbc as well as mdbc. In order to use, must have mysql and -// * running locally. Must have a database EMP created in the -// * mysql db. Uses "Driver.getConnection(com.mysql.jdbc.Driver)" for jdbc connection -// * -// */ -// //@Test -// public void test02() { -// System.out.println("TEST 2: Getting ready for testing connection via jdbc"); -// // JDBC driver name and database URL -// final String JDBC_DRIVER = "com.mysql.jdbc.Driver"; -// final String DB_URL = "jdbc:mysql://localhost/EMP"; -// -// // Database credentials -// final String USER = "alice"; -// final String PASS = "bob"; -// Properties connectionProps = new Properties(); -// connectionProps.put("user", USER); -// connectionProps.put("password", PASS); -// -// System.out.println("Connecting directly to database..."); -// connectViaDriverManager(JDBC_DRIVER, DB_URL, connectionProps); -// System.out.println(); -// } -// -// /** -// * Performs same test as @test02() except this test uses mdbc. -// * -// * In order to use, must have mysql and Cassandra services running locally. Must -// * have a database EMP created in the mysql db. Uses -// * "Driver.getConnection(com.att.research.mdbc.ProxyDriver)" for mdbc -// * connection -// */ -// //@Test -// public void test03() { -// System.out.println("TEST 3: Getting ready for testing connection via mdbc"); -// // Database credentials -// final String USER = "alice"; -// final String PASS = "bob"; -// Properties connectionProps = new Properties(); -// connectionProps.put("user", USER); -// connectionProps.put("password", PASS); -// -// final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver"; -// final String MDBC_DB_URL = "jdbc:mdbc://localhost/TEST"; -// final String MDBC_DB_MIXIN = "mysql"; -// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN); -// -// System.out.println("Connecting to database via mdbc"); -// connectViaDriverManager(MDBC_DRIVER, MDBC_DB_URL, connectionProps); -// System.out.println(); -// } -// -// /** -// * Performs same test as @test02() except this test uses mdbc. -// * -// * In order to use, must have mysql and Cassandra services running locally. Must -// * have a database EMP created in the mysql db. Uses -// * "Driver.getConnection(com.att.research.mdbc.ProxyDriver)" for mdbc -// * connection -// * -// * Uses preparedStatements -// */ -// //@Test -// public void test03point5() { -// System.out.println("TEST 3.5: Getting ready for testing connection via mdbc w/ PreparedStatement"); -// // Database credentials -// final String USER = "alice"; -// final String PASS = "bob"; -// Properties connectionProps = new Properties(); -// connectionProps.put("user", USER); -// connectionProps.put("password", PASS); -// -// final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver"; -// final String MDBC_DB_URL = "jdbc:mdbc://localhost/EMP"; -// //final String MDBC_DRIVER = "org.h2.Driver"; -// //final String MDBC_DB_URL = "jdbc:h2:tcp://localhost:9092/~/test"; -// final String MDBC_DB_MIXIN = "mysql"; -// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN); -// -// System.out.println("Connecting to database via mdbc"); -// Connection conn = null; -// PreparedStatement stmt = null; -// try { -// //STEP 2: Register JDBC driver -// Class.forName(MDBC_DRIVER); -// -// //STEP 3: Open a connection -// conn = DriverManager.getConnection(MDBC_DB_URL, connectionProps); -// conn.setAutoCommit(false); -// -// //STEP 4: Execute a query -// System.out.println("Inserting into DB"); -// stmt = conn.prepareStatement("INSERT INTO EMPLOYEE (id, first, last, age) VALUES (?, ?, ?, ?)"); -// stmt.setString(1, null); -// stmt.setString(2, "John"); -// stmt.setString(3, "Smith"); -// stmt.setInt(4, 20); -// stmt.execute(); -// -// System.out.println("Inserting again into DB"); -// stmt.setString(2, "Jane"); -// stmt.setInt(4, 30); -// stmt.execute(); -// -// stmt.close(); -// -// conn.commit(); -// -// System.out.println("Querying the DB"); -// stmt = conn.prepareStatement("SELECT id, first, last, age FROM EMPLOYEE WHERE age < ?"); -// stmt.setInt(1, 25); -// ResultSet rs = stmt.executeQuery(); -// //STEP 5: Extract data from result set -// while(rs.next()) { -// //Retrieve by column name -// int id = rs.getInt("id"); -// int age = rs.getInt("age"); -// String first = rs.getString("first"); -// String last = rs.getString("last"); -// -// //Display values -// //* -// System.out.print("ID: " + id); -// System.out.print(", Age: " + age); -// System.out.print(", First: " + first); -// System.out.println(", Last: " + last); -// //*/ -// } -// -// System.out.println("Querying again"); -// stmt.setInt(1, 35); -// rs = stmt.executeQuery(); -// //STEP 5: Extract data from result set -// while(rs.next()) { -// //Retrieve by column name -// int id = rs.getInt("id"); -// int age = rs.getInt("age"); -// String first = rs.getString("first"); -// String last = rs.getString("last"); -// -// //Display values -// //* -// System.out.print("ID: " + id); -// System.out.print(", Age: " + age); -// System.out.print(", First: " + first); -// System.out.println(", Last: " + last); -// //*/ -// } -// -// -// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\""; -// //stmt.execute(sql); -// -// //sql = "DROP TABLE IF EXISTS EMPLOYEE"; -// //stmt.execute(sql); -// -// //STEP 6: Clean-up environment -// rs.close(); -// stmt.close(); -// conn.close(); -// } catch(SQLException se) { -// //Handle errors for JDBC -// se.printStackTrace(); -// } catch (Exception e) { -// //Handle errors for Class.forName -// e.printStackTrace(); -// } finally { -// //finally block used to close resources -// try { -// if(stmt!=null) -// stmt.close(); -// } catch(SQLException se2) { -// } -// try { -// if(conn!=null) -// conn.close(); -// } catch(SQLException se) { -// se.printStackTrace(); -// } -// } -// System.out.println("Done"); -// } -// -// -// /** -// * Connects to a generic database. Can be used for mdbc or jdbc -// * @param DBC_DRIVER the driver for which to register (Class.forName(DBC_DRIVER)) -// * @param DB_URL the URL for the database we are testing -// * @param connectionProps -// */ -// private void connectViaDriverManager(final String DBC_DRIVER, final String DB_URL, Properties connectionProps) { -// Connection conn = null; -// Statement stmt = null; -// try { -// -// //Server server = Server.createTcpServer("-tcpAllowOthers").start(); -// //STEP 2: Register JDBC driver -// Class.forName(DBC_DRIVER); -// -// //STEP 3: Open a connection -// conn = DriverManager.getConnection(DB_URL, connectionProps); -// conn.setAutoCommit(false); -// -// //STEP 4: Execute a query -// stmt = conn.createStatement(); -// String sql; -// -// //sql = "DROP TABLE EMPLOYEE"; -// //stmt.execute(sql); -// -// sql = "CREATE TABLE IF NOT EXISTS EMPLOYEE (id INT primary key, first VARCHAR(20), last VARCHAR(20), age INT);"; -// stmt.execute(sql); -// -// sql = "INSERT INTO EMPLOYEE (id, first, last, age) VALUES (\"34\", \"Jane4\", \"Doe4\", \"40\")"; -// stmt.execute(sql); -// -// sql = "SELECT id, first, last, age FROM EMPLOYEE"; -// ResultSet rs = stmt.executeQuery(sql); -// -// //STEP 5: Extract data from result set -// while(rs.next()) { -// //Retrieve by column name -// int id = rs.getInt("id"); -// int age = rs.getInt("age"); -// String first = rs.getString("first"); -// String last = rs.getString("last"); -// -// //Display values -// //* -// System.out.print("ID: " + id); -// System.out.print(", Age: " + age); -// System.out.print(", First: " + first); -// System.out.println(", Last: " + last); -// //*/ -// -// } -// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\""; -// //stmt.execute(sql); -// -// //sql = "DROP TABLE IF EXISTS EMPLOYEE"; -// //stmt.execute(sql); -// -// conn.commit(); -// -// //STEP 6: Clean-up environment -// rs.close(); -// stmt.close(); -// conn.close(); -// } catch(SQLException se) { -// //Handle errors for JDBC -// se.printStackTrace(); -// } catch (Exception e) { -// //Handle errors for Class.forName -// e.printStackTrace(); -// } finally { -// //finally block used to close resources -// try { -// if(stmt!=null) -// stmt.close(); -// } catch(SQLException se2) { -// } -// try { -// if(conn!=null) -// conn.close(); -// } catch(SQLException se) { -// se.printStackTrace(); -// } -// } -// } -// -// -// -// /** -// * Must be mysql datasource -// * @throws Exception -// */ -// //@Test -// public void test04() throws Exception { -// String dbConnectionName = "testing"; -// String dbUserId = "alice"; -// String dbPasswd = "bob"; -// String db_url = "jdbc:mysql://localhost/EMP"; -// MysqlDataSource dataSource = new MysqlDataSource(); -// dataSource.setUser(dbUserId); -// dataSource.setPassword(dbPasswd); -// dataSource.setURL(db_url); -// -// -// Connection con = dataSource.getConnection(); -// Statement st = con.createStatement(); -// ResultSet rs = null; -// -// //FIXME CREATE EMPLOYEE TABLE -// -// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) { -// rs = st.getResultSet(); -// } -// -// rs = st.executeQuery("select * from EMPLOYEE;"); -// while (rs.next()) { -// System.out.println(rs.getString("name")); -// } -// -// if (st.execute("DELETE FROM EMPLOYEE")) { -// rs = st.getResultSet(); -// } -// rs.close(); -// st.close(); -// con.close(); -// } -// -// /** -// * Test connection to mysql datasource class -// * @throws Exception -// */ -// @Test -// public void test05() throws Exception { -// String dbConnectionName = "testing"; -// String dbUserId = "alice"; -// String dbPasswd = "bob"; -// String db_url = "jdbc:mdbc://localhost/EMP"; -// String db_type = "mysql"; -// MdbcDataSource dataSource = new MdbcDataSource(); -// dataSource.setUser(dbUserId); -// dataSource.setPassword(dbPasswd); -// dataSource.setURL(db_url); -// dataSource.setDBType(db_type); -// -// Connection con = dataSource.getConnection(); -// Statement st = con.createStatement(); -// ResultSet rs = null; -// -// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) { -// rs = st.getResultSet(); -// } -// -// rs = st.executeQuery("select * from EMPLOYEE;"); -// while (rs.next()) { -// System.out.println(rs.getString("name")); -// } -// -// if (st.execute("DELETE FROM EMPLOYEE")) { -// rs = st.getResultSet(); -// } -// rs.close(); -// st.close(); -// con.close(); -// } -} diff --git a/src/main/java/com/att/research/mdbc/tests/MAIN.java b/src/main/java/com/att/research/mdbc/tests/MAIN.java deleted file mode 100755 index 164b088..0000000 --- a/src/main/java/com/att/research/mdbc/tests/MAIN.java +++ /dev/null @@ -1,106 +0,0 @@ -package com.att.research.mdbc.tests; - -import java.io.FileInputStream; -import java.io.InputStream; -import java.lang.reflect.Constructor; -import java.util.ArrayList; -import java.util.List; - -import org.apache.log4j.Logger; -import org.json.JSONArray; -import org.json.JSONObject; -import org.json.JSONTokener; - -/** - * Run all the tests against all the configurations specified in /tests.json. - * - * @author Robert Eby - */ -public class MAIN { - public static final String CONFIG = "/tests.json"; - - /** - * This class runs all the tests against all the configurations specified in /tests.json. - * It assumes that a copy of Cassandra is running locally on port 9042, that a copy of H2 - * server is is running locally on port 8082, and that a copy of MySQL is running locally - * on port 3306. These can be adjusted by editing the /tests.json file. - * - * @param args command line arguments - * @throws Exception if anything goes wrong - */ - public static void main(String[] args) throws Exception { - new MAIN(args).run(); - System.exit(0); - } - - private JSONArray configs; - private List tests; - private int total_success, total_failure; - - public MAIN(String[] args) throws Exception { - configs = null; - tests = new ArrayList(); - total_success = total_failure = 0; - - InputStream is = null; - if (args.length == 0) { - is = this.getClass().getResourceAsStream(CONFIG); - } else { - is = new FileInputStream(args[0]); - } - if (is != null) { - JSONObject jo = new JSONObject(new JSONTokener(is)); - is.close(); - configs = jo.getJSONArray("configs"); - - JSONArray ja = jo.getJSONArray("tests"); - for (int i = 0; i < ja.length(); i++) { - Class cl = Class.forName(ja.getString(i).trim()); - if (cl != null) { - Constructor con = cl.getConstructor(); - tests.add((Test) con.newInstance()); - } - } - } else { - String conf = (args.length == 0) ? CONFIG : args[0]; - throw new Exception("Cannot find configuration resource: "+conf); - } - } - public void run() { - Logger logger = Logger.getLogger(this.getClass()); - for (int ix = 0; ix < configs.length(); ix++) { - JSONObject config = configs.getJSONObject(ix); - int succ = 0, fail = 0; - logger.info("*** Testing with configuration: "+config.getString("description")); - System.out.println("Testing with configuration: "+config.getString("description")); - for (Test t : tests) { - String nm = t.getName() + " ............................................................"; - System.out.print(" Test: "+nm.substring(0, 60)); - try { - List msgs = t.run(config); - if (msgs == null || msgs.size() == 0) { - succ++; - System.out.println(" OK!"); - } else { - fail++; - System.out.println(" Fail!"); - System.out.flush(); - for (String m : msgs) { - System.out.println(" "+m); - } - System.out.flush(); - } - } catch (Exception x) { - fail++; - System.out.println(" Fail!"); - } - } - System.out.println(); - total_success += succ; - total_failure += fail; - } - String m = "Testing completed: "+total_success+" successful tests, "+total_failure+": failures."; - logger.info(m); - System.out.println(m); - } -} diff --git a/src/main/java/com/att/research/mdbc/tests/Test.java b/src/main/java/com/att/research/mdbc/tests/Test.java deleted file mode 100755 index 0b8c0ab..0000000 --- a/src/main/java/com/att/research/mdbc/tests/Test.java +++ /dev/null @@ -1,105 +0,0 @@ -package com.att.research.mdbc.tests; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.List; -import java.util.Properties; - -import org.json.JSONArray; -import org.json.JSONObject; - -/** - * Provides the abstract interface for a Test, as well as some common functions. - * - * @author Robert Eby - */ -public abstract class Test { - public static final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver"; - - /** - * Each test derived from this class must implement this method, - * which runs the test and produces a list of error messages. - * - * @param config a JSONObject describing the configuration to use for this run of the test - * @return the list of messages. If the list is empty, the test is considered to have run - * successfully. - */ - abstract public List run(JSONObject config); - - public String getName() { - String s = this.getClass().getName(); - return s.replaceAll("com.att.research.mdbc.tests.", ""); - } - - public Properties buildProperties(JSONObject config, int i) { - Properties p = new Properties(); - for (String key : config.keySet()) { - if (key.equals("connections")) { - JSONArray ja = config.getJSONArray("connections"); - JSONObject connection = ja.getJSONObject(i); - for (String key2 : connection.keySet()) { - p.setProperty(key2, connection.getString(key2)); - } - } else { - p.setProperty(key, config.getString(key)); - } - } - return p; - } - - public Connection getDBConnection(Properties pr) throws SQLException, ClassNotFoundException { - Class.forName(MDBC_DRIVER); - String url = pr.getProperty("url"); - return DriverManager.getConnection(url, pr); - } - - public void assertNotNull(Object o) throws Exception { - if (o == null) - throw new Exception("Object is null"); - } - - public void assertTableContains(int connid, Connection conn, String tbl, Object... kv) throws Exception { - ResultSet rs = getRow(conn, tbl, kv); - boolean throwit = !rs.next(); - rs.close(); - if (throwit) { - throw new Exception("Conn id "+connid+" Table "+tbl+" does not have a row with "+catkeys(kv)); - } - } - public void assertTableDoesNotContain(int connid, Connection conn, String tbl, Object... kv) throws Exception { - boolean throwit = true; - try { - assertTableContains(connid, conn, tbl, kv); - } catch (Exception x) { - throwit = false; - } - if (throwit) { - throw new Exception("Conn id "+connid+" Table "+tbl+" does have a row with "+catkeys(kv)); - } - } - public ResultSet getRow(Connection conn, String tbl, Object... kv) throws SQLException { - Statement stmt = conn.createStatement(); - StringBuilder sql = new StringBuilder("SELECT * FROM ") - .append(tbl) - .append(" WHERE ") - .append(catkeys(kv)); - return stmt.executeQuery(sql.toString()); - } - public String catkeys(Object... kv) { - StringBuilder sql = new StringBuilder(); - String pfx = ""; - for (int i = 0; (i+1) < kv.length; i += 2) { - sql.append(pfx).append(kv[i]).append("="); - if (kv[i+1] instanceof String) { - sql.append("'").append(kv[i+1]).append("'"); - } else { - sql.append(kv[i+1].toString()); - } - pfx = " AND "; - } - return sql.toString(); - } -} diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Delete.java b/src/main/java/com/att/research/mdbc/tests/Test_Delete.java deleted file mode 100755 index 8017cb3..0000000 --- a/src/main/java/com/att/research/mdbc/tests/Test_Delete.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.att.research.mdbc.tests; - -import java.sql.Connection; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.List; - -import org.json.JSONArray; -import org.json.JSONObject; - -/** - * Test that DELETEs work on the original DB, and are correctly copied to replica DBs. - * - * @author Robert Eby - */ -public class Test_Delete extends Test { - private final String TBL = "DELTABLE"; - - @Override - public List run(JSONObject config) { - List msgs = new ArrayList(); - JSONArray connections = config.getJSONArray("connections"); - Connection[] conn = new Connection[connections.length()]; - Statement[] stmt = new Statement[conn.length]; - try { - for (int i = 0; i < conn.length; i++) { - conn[i] = getDBConnection(buildProperties(config, i)); - assertNotNull(conn[i]); - stmt[i] = conn[i].createStatement(); - assertNotNull(stmt[i]); - } - - try { - for (int i = 0; i < conn.length; i++) { - conn[i].setAutoCommit(true); - stmt[i].execute("CREATE TABLE IF NOT EXISTS DELTABLE(ID_ varchar(255), RANDOMTXT varchar(255), primary key (ID_))"); - } - stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('1', 'Everything''s Negotiable Except Cutting Medicaid')"); - stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('2', 'Can a Sideways Elevator Help Designers Build Taller Skyscrapers?')"); - stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('3', 'Can a Bernie Sanders Ally Win the Maryland Governor''s Mansion?')"); - for (int i = 0; i < conn.length; i++) { - assertTableContains(i, conn[i], TBL, "ID_", "1"); - assertTableContains(i, conn[i], TBL, "ID_", "2"); - assertTableContains(i, conn[i], TBL, "ID_", "3"); - } - - stmt[0].execute("DELETE FROM DELTABLE WHERE ID_ = '1'"); - for (int i = 0; i < conn.length; i++) { - assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1"); - assertTableContains(i, conn[i], TBL, "ID_", "2"); - assertTableContains(i, conn[i], TBL, "ID_", "3"); - } - } catch (Exception e) { - msgs.add(e.toString()); - } finally { - for (int i = 0; i < stmt.length; i++) { - if (stmt[i] != null) - stmt[i].close(); - } - for (int i = 0; i < conn.length; i++) { - if (conn[i] != null) - conn[i].close(); - } - } - } catch (Exception e) { - msgs.add(e.toString()); - } - return msgs; - } -} diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Insert.java b/src/main/java/com/att/research/mdbc/tests/Test_Insert.java deleted file mode 100755 index 4c19dbd..0000000 --- a/src/main/java/com/att/research/mdbc/tests/Test_Insert.java +++ /dev/null @@ -1,94 +0,0 @@ -package com.att.research.mdbc.tests; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.List; - -import org.json.JSONArray; -import org.json.JSONObject; - -/** - * Test that INSERTs work to the original DB, and are correctly copied to replica DBs. - * - * @author Robert Eby - */ -public class Test_Insert extends Test { - private final String PERSON = "PERSON"; - private final String SONG = "SONG"; - - @Override - public List run(JSONObject config) { - List msgs = new ArrayList(); - JSONArray connections = config.getJSONArray("connections"); - Connection[] conn = new Connection[connections.length()]; - Statement[] stmt = new Statement[conn.length]; - try { - for (int i = 0; i < conn.length; i++) { - conn[i] = getDBConnection(buildProperties(config, i)); - assertNotNull(conn[i]); - stmt[i] = conn[i].createStatement(); - assertNotNull(stmt[i]); - } - - try { - for (int i = 0; i < conn.length; i++) { - conn[i].setAutoCommit(true); - stmt[i].execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))"); - } - stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Zaphod', '111-22-3333')"); - stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Ripley', '444-55-6666')"); - stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Spock', '777-88-9999')"); - for (int i = 0; i < conn.length; i++) { - assertTableContains(i, conn[i], PERSON, "ID_", "1"); - assertTableContains(i, conn[i], PERSON, "ID_", "2"); - assertTableContains(i, conn[i], PERSON, "ID_", "3"); - } - - stmt[0].execute("UPDATE PERSON SET NAME = 'Jabba' WHERE ID_ = '2'"); - for (int i = 0; i < conn.length; i++) { - ResultSet rs = getRow(conn[i], PERSON, "ID_", "2"); - if (rs.next()) { - String v = rs.getString("NAME"); - if (!v.equals("Jabba")) - throw new Exception("Table PERSON, row with ID_ = '2' was not updated."); - } else { - throw new Exception("Table PERSON does not have a row with ID_ = '2'"); - } - rs.close(); - } - - for (int i = 0; i < conn.length; i++) { - stmt[i].execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))"); - } - stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')"); - stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')"); - stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')"); - stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')"); - stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')"); - for (int i = 0; i < conn.length; i++) { - assertTableContains(i, conn[i], SONG, "ID_", "1", "PREF", 1); - assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 5); - assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 2); - assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 77); - assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 69); - } - } catch (Exception e) { - msgs.add(e.toString()); - } finally { - for (int i = 0; i < stmt.length; i++) { - if (stmt[i] != null) - stmt[i].close(); - } - for (int i = 0; i < conn.length; i++) { - if (conn[i] != null) - conn[i].close(); - } - } - } catch (Exception e) { - msgs.add(e.toString()); - } - return msgs; - } -} diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java b/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java deleted file mode 100755 index 1153c9b..0000000 --- a/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java +++ /dev/null @@ -1,74 +0,0 @@ -package com.att.research.mdbc.tests; - -import java.sql.Connection; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.List; - -import org.json.JSONArray; -import org.json.JSONObject; - -/** - * Test that transactions work between the original DB, and replica DBs. - * - * @author Robert Eby - */ -public class Test_Transactions extends Test { - private final String TBL = "TRANSTEST"; - - @Override - public List run(JSONObject config) { - List msgs = new ArrayList(); - JSONArray connections = config.getJSONArray("connections"); - Connection[] conn = new Connection[connections.length()]; - Statement[] stmt = new Statement[conn.length]; - try { - for (int i = 0; i < conn.length; i++) { - conn[i] = getDBConnection(buildProperties(config, i)); - assertNotNull(conn[i]); - stmt[i] = conn[i].createStatement(); - assertNotNull(stmt[i]); - } - - try { - for (int i = 0; i < conn.length; i++) { - conn[i].setAutoCommit(true); - stmt[i].execute("CREATE TABLE IF NOT EXISTS TRANSTEST(ID_ varchar(12), STUFF varchar(255), primary key (ID_))"); - conn[i].setAutoCommit(false); - } - stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('1', 'CenturyLink Now Under Fire on All Sides For Fraudulent Billing')"); - stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('2', 'Netflix Now in Half of All Broadband Households, Study Says')"); - stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('3', 'Private Data Of 6 Million Verizon Customers Exposed')"); - assertTableContains(0, conn[0], TBL, "ID_", "1"); - assertTableContains(0, conn[0], TBL, "ID_", "2"); - assertTableContains(0, conn[0], TBL, "ID_", "3"); - for (int i = 1; i < conn.length; i++) { - assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1"); - assertTableDoesNotContain(i, conn[i], TBL, "ID_", "2"); - assertTableDoesNotContain(i, conn[i], TBL, "ID_", "3"); - } - conn[0].commit(); - for (int i = 0; i < conn.length; i++) { - assertTableContains(i, conn[i], TBL, "ID_", "1"); - assertTableContains(i, conn[i], TBL, "ID_", "2"); - assertTableContains(i, conn[i], TBL, "ID_", "3"); - } - - } catch (Exception e) { - msgs.add(e.toString()); - } finally { - for (int i = 0; i < stmt.length; i++) { - if (stmt[i] != null) - stmt[i].close(); - } - for (int i = 0; i < conn.length; i++) { - if (conn[i] != null) - conn[i].close(); - } - } - } catch (Exception e) { - msgs.add(e.toString()); - } - return msgs; - } -} diff --git a/src/main/java/com/att/research/mdbc/tests/package-info.java b/src/main/java/com/att/research/mdbc/tests/package-info.java deleted file mode 100755 index ee993db..0000000 --- a/src/main/java/com/att/research/mdbc/tests/package-info.java +++ /dev/null @@ -1,165 +0,0 @@ -/** - *

- * This package provides a testing harness to test the various features of MDBC against - * multiple combinations of database and MUSIC mixins. The configurations (consisting of - * database information and mixin combinations) to test, as well as the specific tests to - * run are all defined in the configuration file test.json. - *

- *

- * To run the tests against all the configurations specified in /tests.json, do the following: - *

- *
- * 	java com.att.research.mdbc.tests.MAIN [ configfile ]
- * 
- *

- * It is assumed that a copy of Cassandra is running locally on port 9042, - * that a copy of H2 server is is running locally on port 8082, - * and that a copy of MySQL (or MariaDB) is running locally on port 3306. - * These can be adjusted by editing the /tests.json file. - *

- *

- * When building a copy of MDBC for production use, this package can be safely removed. - *

- *

- * The initial copy of tests.json is as follows: - *

- *
- * {
- *	"tests": [
- *		"com.att.research.mdbc.tests.Test_Insert",
- *		"com.att.research.mdbc.tests.Test_Delete",
- *		"com.att.research.mdbc.tests.Test_Transactions"
- *	],
- *	"configs": [
- *		{
- *			"description": "H2 with Cassandra with two connections",
- *			"MDBC_DB_MIXIN": "h2",
- *			"MDBC_MUSIC_MIXIN": "cassandra",
- *			"replicas": "0,1",
- *			"music_keyspace": "mdbctest1",
- *			"music_address": "localhost",
- *			"music_rfactor": "1",
- *			"connections": [
- *				{
- *					"name": "Connection 0",
- *					"url": "jdbc:mdbc:mem:db0",
- *					"user": "",
- *					"password": "",
- *					"myid": "0"
- *				},
- *				{
- *					"name": "Connection 1",
- *					"url": "jdbc:mdbc:mem:db1",
- *					"user": "",
- *					"password": "",
- *					"myid": "1"
- *				}
- *			]
- *		},
- *		{
- *			"description": "H2 with Cassandra2 with three connections",
- *			"MDBC_DB_MIXIN": "h2",
- *			"MDBC_MUSIC_MIXIN": "cassandra2",
- *			"replicas": "0,1,2",
- *			"music_keyspace": "mdbctest2",
- *			"music_address": "localhost",
- *			"music_rfactor": "1",
- *			"user": "",
- *			"password": "",
- *			"connections": [
- *				{
- *					"name": "Connection 0",
- *					"url": "jdbc:mdbc:mem:db0",
- *					"myid": "0"
- *				},
- *				{
- *					"name": "Connection 1",
- *					"url": "jdbc:mdbc:mem:db1",
- *					"myid": "1"
- *				},
- *				{
- *					"name": "Connection 2",
- *					"url": "jdbc:mdbc:mem:db2",
- *					"myid": "2"
- *				}
- *			]
- *		},
- *		{
- *			"description": "H2 Server with Cassandra2 with two connections",
- *			"MDBC_DB_MIXIN": "h2server",
- *			"MDBC_MUSIC_MIXIN": "cassandra2",
- *			"replicas": "0,1",
- *			"music_keyspace": "mdbctest3",
- *			"music_address": "localhost",
- *			"music_rfactor": "1",
- *			"connections": [
- *				{
- *					"name": "Connection 0",
- *					"url": "jdbc:mdbc:tcp://localhost/mdbc0",
- *					"user": "",
- *					"password": "",
- *					"myid": "0"
- *				},
- *				{
- *					"name": "Connection 1",
- *					"url": "jdbc:mdbc:tcp://localhost/mdbc1",
- *					"user": "",
- *					"password": "",
- *					"myid": "1"
- *				}
- *			]
- *		},
- *		{
- *			"description": "MySQL with Cassandra2 with two connections",
- *			"MDBC_DB_MIXIN": "mysql",
- *			"MDBC_MUSIC_MIXIN": "cassandra2",
- *			"replicas": "0,1,2",
- *			"music_keyspace": "mdbctest4",
- *			"music_address": "localhost",
- *			"music_rfactor": "1",
- *			"user": "root",
- *			"password": "abc123",
- *			"connections": [
- *				{
- *					"name": "Connection 0",
- *					"url": "jdbc:mdbc://127.0.0.1:3306/mdbc",
- *					"myid": "0"
- *				},
- *				{
- *					"name": "Connection 1",
- *					"url": "jdbc:mdbc://127.0.0.1:3306/mdbc2",
- *					"myid": "1"
- *				}
- *			]
- *		},
- *		{
- *			"description": "H2 (DB #1) and MySQL (DB #2) with Cassandra2",
- *			"MDBC_MUSIC_MIXIN": "cassandra2",
- *			"replicas": "0,1",
- *			"music_keyspace": "mdbctest5",
- *			"music_address": "localhost",
- *			"music_rfactor": "1",
- *			"connections": [
- *				{
- *					"name": "Connection 0",
- *					"MDBC_DB_MIXIN": "h2",
- *					"url": "jdbc:mdbc:mem:db9",
- *					"user": "",
- *					"password": "",
- *					"myid": "0"
- *				},
- *				{
- *					"name": "Connection 1",
- *					"MDBC_DB_MIXIN": "mysql",
- *					"url": "jdbc:mdbc://127.0.0.1:3306/mdbc3",
- *					"user": "root",
- *					"password": "abc123",
- *					"myid": "1"
- *				}
- *			]
- *		}
- *	]
- * }
- * 
- */ -package com.att.research.mdbc.tests; diff --git a/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java b/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java deleted file mode 100644 index f0eca5b..0000000 --- a/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.att.research.mdbc.tools; - -import com.att.research.exceptions.MDBCServiceException; -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.configurations.NodeConfiguration; -import com.att.research.mdbc.configurations.TablesConfiguration; -import com.beust.jcommander.JCommander; -import com.beust.jcommander.Parameter; - -import java.io.FileNotFoundException; -import java.util.List; - -public class CreateNodeConfigurations { - public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreateNodeConfigurations.class); - - private String tables; - @Parameter(names = { "-t", "--table-configurations" }, required = true, - description = "This is the input file that is going to have the configuration for all the tables and partitions") - private String tableConfigurationsFile; - @Parameter(names = { "-b", "--basename" }, required = true, - description = "This base name for all the outputs files that are going to be created") - private String basename; - @Parameter(names = { "-o", "--output-dir" }, required = true, - description = "This is the output directory that is going to contain all the configuration file to be generated") - private String outputDirectory; - @Parameter(names = { "-h", "-help", "--help" }, help = true, - description = "Print the help message") - private boolean help = false; - - private TablesConfiguration inputConfig; - - public CreateNodeConfigurations(){} - - - public void readInput(){ - try { - inputConfig = TablesConfiguration.readJsonFromFile(tableConfigurationsFile); - } catch (FileNotFoundException e) { - LOG.error("Input file is invalid or not found"); - System.exit(1); - } - } - - public void createAndSaveNodeConfigurations(){ - List nodes = null; - try { - nodes = inputConfig.initializeAndCreateNodeConfigurations(); - } catch (MDBCServiceException e) { - e.printStackTrace(); - } - int counter = 0; - for(NodeConfiguration nodeConfig : nodes){ - String name = (nodeConfig.nodeName==null||nodeConfig.nodeName.isEmpty())?Integer.toString(counter++): nodeConfig.nodeName; - nodeConfig.saveToFile(outputDirectory+"/"+basename+"-"+name+".json"); - } - } - - public static void main(String[] args) { - CreateNodeConfigurations configs = new CreateNodeConfigurations(); - @SuppressWarnings("deprecation") - JCommander jc = new JCommander(configs, args); - if (configs.help) { - jc.usage(); - System.exit(1); - return; - } - configs.readInput(); - configs.createAndSaveNodeConfigurations(); - } -} diff --git a/src/main/java/com/att/research/mdbc/tools/CreatePartition.java b/src/main/java/com/att/research/mdbc/tools/CreatePartition.java deleted file mode 100644 index a38274b..0000000 --- a/src/main/java/com/att/research/mdbc/tools/CreatePartition.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.att.research.mdbc.tools; - -import com.att.research.logging.EELFLoggerDelegate; -import com.att.research.mdbc.configurations.NodeConfiguration; -import com.beust.jcommander.JCommander; -import com.beust.jcommander.Parameter; - -public class CreatePartition { - public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreatePartition.class); - - @Parameter(names = { "-t", "--tables" }, required = true, - description = "This is the tables that are assigned to this ") - private String tables; - @Parameter(names = { "-f", "--file" }, required = true, - description = "This is the output file that is going to have the configuration for the ranges") - private String file; - @Parameter(names = { "-i", "--mri-index" }, required = true, - description = "Index in the Mri Table") - private String mriIndex; - @Parameter(names = { "-m", "--mri-table-name" }, required = true, - description = "Mri Table name") - private String mriTable; - @Parameter(names = { "-r", "--music-tx-digest-table-name" }, required = true, - description = "Music Transaction Digest Table name") - private String mtxdTable; - @Parameter(names = { "-p", "--partition-id" }, required = true, - description = "Partition Id") - private String partitionId; - @Parameter(names = { "-h", "-help", "--help" }, help = true, - description = "Print the help message") - private boolean help = false; - - NodeConfiguration config; - - public CreatePartition(){ - } - - public void convert(){ - config = new NodeConfiguration(tables, mriIndex,mriTable,partitionId,"test","", mtxdTable); - } - - public void saveToFile(){ - config.saveToFile(file); - } - - public static void main(String[] args) { - - CreatePartition newPartition = new CreatePartition(); - @SuppressWarnings("deprecation") - JCommander jc = new JCommander(newPartition, args); - if (newPartition.help) { - jc.usage(); - System.exit(1); - return; - } - newPartition.convert(); - newPartition.saveToFile(); - } -} diff --git a/src/main/java/org/onap/music/exceptions/MDBCServiceException.java b/src/main/java/org/onap/music/exceptions/MDBCServiceException.java new file mode 100644 index 0000000..9be84e5 --- /dev/null +++ b/src/main/java/org/onap/music/exceptions/MDBCServiceException.java @@ -0,0 +1,88 @@ +/* + * ============LICENSE_START========================================== + * org.onap.music + * =================================================================== + * Copyright (c) 2017 AT&T Intellectual Property + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============LICENSE_END============================================= + * ==================================================================== + */ + +package org.onap.music.exceptions; + +/** + * @author inam + * + */ +public class MDBCServiceException extends Exception { + + + /** + * + */ + private static final long serialVersionUID = 1L; + private int errorCode; + private String errorMessage; + + public int getErrorCode() { + return errorCode; + } + + + public void setErrorCode(int errorCode) { + this.errorCode = errorCode; + } + + + public String getErrorMessage() { + return errorMessage; + } + + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + + + public MDBCServiceException() { + super(); + } + + + public MDBCServiceException(String message) { + super(message); + + } + + + public MDBCServiceException(Throwable cause) { + super(cause); + + } + + + public MDBCServiceException(String message, Throwable cause) { + super(message, cause); + + } + + + public MDBCServiceException(String message, Throwable cause, boolean enableSuppression, + boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + + } + +} diff --git a/src/main/java/org/onap/music/exceptions/QueryException.java b/src/main/java/org/onap/music/exceptions/QueryException.java new file mode 100644 index 0000000..72a7cee --- /dev/null +++ b/src/main/java/org/onap/music/exceptions/QueryException.java @@ -0,0 +1,90 @@ +/* + * ============LICENSE_START========================================== + * org.onap.music + * =================================================================== + * Copyright (c) 2017 AT&T Intellectual Property + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============LICENSE_END============================================= + * ==================================================================== + */ +package org.onap.music.exceptions; + + + +/** + * @author inam + * + */ +public class QueryException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 1L; + @SuppressWarnings("unused") + private int errorCode; + + + /** + * + */ + public QueryException() { + super(); + } + + /** + * @param message + */ + public QueryException(String message) { + super(message); + } + + + + /** + * @param message + */ + public QueryException(String message, int errorCode) { + super(message); + this.errorCode = errorCode; + } + + /** + * @param cause + */ + public QueryException(Throwable cause) { + super(cause); + } + + /** + * @param message + * @param cause + */ + public QueryException(String message, Throwable cause) { + super(message, cause); + } + + /** + * @param message + * @param cause + * @param enableSuppression + * @param writableStackTrace + */ + public QueryException(String message, Throwable cause, boolean enableSuppression, + boolean writableStackTrace) { + super(message, cause, enableSuppression, writableStackTrace); + } + +} diff --git a/src/main/java/org/onap/music/logging/EELFLoggerDelegate.java b/src/main/java/org/onap/music/logging/EELFLoggerDelegate.java new file mode 100644 index 0000000..16a70dd --- /dev/null +++ b/src/main/java/org/onap/music/logging/EELFLoggerDelegate.java @@ -0,0 +1,339 @@ + +package org.onap.music.logging; + +import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN; +import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS; +import static com.att.eelf.configuration.Configuration.MDC_SERVICE_INSTANCE_ID; +import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME; + +import java.net.InetAddress; +import java.text.MessageFormat; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import javax.servlet.http.HttpServletRequest; + +import org.slf4j.MDC; + +import com.att.eelf.configuration.EELFLogger; +import com.att.eelf.configuration.EELFManager; +import com.att.eelf.configuration.SLF4jWrapper; + +public class EELFLoggerDelegate extends SLF4jWrapper implements EELFLogger { + + public static final EELFLogger errorLogger = EELFManager.getInstance().getErrorLogger(); + public static final EELFLogger applicationLogger = EELFManager.getInstance().getApplicationLogger(); + public static final EELFLogger auditLogger = EELFManager.getInstance().getAuditLogger(); + public static final EELFLogger metricsLogger = EELFManager.getInstance().getMetricsLogger(); + public static final EELFLogger debugLogger = EELFManager.getInstance().getDebugLogger(); + + private String className; + private static ConcurrentMap classMap = new ConcurrentHashMap<>(); + + public EELFLoggerDelegate(final String className) { + super(className); + this.className = className; + } + + /** + * Convenience method that gets a logger for the specified class. + * + * @see #getLogger(String) + * + * @param clazz + * @return Instance of EELFLoggerDelegate + */ + public static EELFLoggerDelegate getLogger(Class clazz) { + return getLogger(clazz.getName()); + } + + /** + * Gets a logger for the specified class name. If the logger does not already + * exist in the map, this creates a new logger. + * + * @param className + * If null or empty, uses EELFLoggerDelegate as the class name. + * @return Instance of EELFLoggerDelegate + */ + public static EELFLoggerDelegate getLogger(final String className) { + String classNameNeverNull = className == null || "".equals(className) ? EELFLoggerDelegate.class.getName() + : className; + EELFLoggerDelegate delegate = classMap.get(classNameNeverNull); + if (delegate == null) { + delegate = new EELFLoggerDelegate(className); + classMap.put(className, delegate); + } + return delegate; + } + + /** + * Logs a message at the lowest level: trace. + * + * @param logger + * @param msg + */ + public void trace(EELFLogger logger, String msg) { + if (logger.isTraceEnabled()) { + logger.trace(msg); + } + } + + /** + * Logs a message with parameters at the lowest level: trace. + * + * @param logger + * @param msg + * @param arguments + */ + public void trace(EELFLogger logger, String msg, Object... arguments) { + if (logger.isTraceEnabled()) { + logger.trace(msg, arguments); + } + } + + /** + * Logs a message and throwable at the lowest level: trace. + * + * @param logger + * @param msg + * @param th + */ + public void trace(EELFLogger logger, String msg, Throwable th) { + if (logger.isTraceEnabled()) { + logger.trace(msg, th); + } + } + + /** + * Logs a message at the second-lowest level: debug. + * + * @param logger + * @param msg + */ + public void debug(EELFLogger logger, String msg) { + if (logger.isDebugEnabled()) { + logger.debug(msg); + } + } + + /** + * Logs a message with parameters at the second-lowest level: debug. + * + * @param logger + * @param msg + * @param arguments + */ + public void debug(EELFLogger logger, String msg, Object... arguments) { + if (logger.isDebugEnabled()) { + logger.debug(msg, arguments); + } + } + + /** + * Logs a message and throwable at the second-lowest level: debug. + * + * @param logger + * @param msg + * @param th + */ + public void debug(EELFLogger logger, String msg, Throwable th) { + if (logger.isDebugEnabled()) { + logger.debug(msg, th); + } + } + + /** + * Logs a message at info level. + * + * @param logger + * @param msg + */ + public void info(EELFLogger logger, String msg) { + logger.info(className + " - "+msg); + } + + /** + * Logs a message with parameters at info level. + * + * @param logger + * @param msg + * @param arguments + */ + public void info(EELFLogger logger, String msg, Object... arguments) { + logger.info(msg, arguments); + } + + /** + * Logs a message and throwable at info level. + * + * @param logger + * @param msg + * @param th + */ + public void info(EELFLogger logger, String msg, Throwable th) { + logger.info(msg, th); + } + + /** + * Logs a message at warn level. + * + * @param logger + * @param msg + */ + public void warn(EELFLogger logger, String msg) { + logger.warn(msg); + } + + /** + * Logs a message with parameters at warn level. + * + * @param logger + * @param msg + * @param arguments + */ + public void warn(EELFLogger logger, String msg, Object... arguments) { + logger.warn(msg, arguments); + } + + /** + * Logs a message and throwable at warn level. + * + * @param logger + * @param msg + * @param th + */ + public void warn(EELFLogger logger, String msg, Throwable th) { + logger.warn(msg, th); + } + + /** + * Logs a message at error level. + * + * @param logger + * @param msg + */ + public void error(EELFLogger logger, String msg) { + logger.error(className+ " - " + msg); + } + + /** + * Logs a message with parameters at error level. + * + * @param logger + * @param msg + * @param arguments + */ + public void error(EELFLogger logger, String msg, Object... arguments) { + logger.error(msg, arguments); + } + + /** + * Logs a message and throwable at error level. + * + * @param logger + * @param msg + * @param th + */ + public void error(EELFLogger logger, String msg, Throwable th) { + logger.error(msg, th); + } + + /** + * Logs a message with the associated alarm severity at error level. + * + * @param logger + * @param msg + * @param severtiy + */ + public void error(EELFLogger logger, String msg, Object /*AlarmSeverityEnum*/ severtiy) { + logger.error(msg); + } + + /** + * Initializes the logger context. + */ + public void init() { + setGlobalLoggingContext(); + final String msg = "############################ Logging is started. ############################"; + // These loggers emit the current date-time without being told. + info(applicationLogger, msg); + error(errorLogger, msg); + debug(debugLogger, msg); + info(auditLogger, msg); + info(metricsLogger, msg); + } + + + /** + * Builds a message using a template string and the arguments. + * + * @param message + * @param args + * @return + */ + @SuppressWarnings("unused") + private String formatMessage(String message, Object... args) { + StringBuilder sbFormattedMessage = new StringBuilder(); + if (args != null && args.length > 0 && message != null && message != "") { + MessageFormat mf = new MessageFormat(message); + sbFormattedMessage.append(mf.format(args)); + } else { + sbFormattedMessage.append(message); + } + + return sbFormattedMessage.toString(); + } + + /** + * Loads all the default logging fields into the MDC context. + */ + private void setGlobalLoggingContext() { + MDC.put(MDC_SERVICE_INSTANCE_ID, ""); + try { + MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName()); + MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress()); + } catch (Exception e) { + errorLogger.error("setGlobalLoggingContext failed", e); + } + } + + public static void mdcPut(String key, String value) { + MDC.put(key, value); + } + + public static String mdcGet(String key) { + return MDC.get(key); + } + + public static void mdcRemove(String key) { + MDC.remove(key); + } + + /** + * Loads the RequestId/TransactionId into the MDC which it should be receiving + * with an each incoming REST API request. Also, configures few other request + * based logging fields into the MDC context. + * + * @param req + * @param appName + */ + public void setRequestBasedDefaultsIntoGlobalLoggingContext(HttpServletRequest req, String appName) { + // Load the default fields + setGlobalLoggingContext(); + + // Load the request based fields + if (req != null) { + + + // Rest Path + MDC.put(MDC_SERVICE_NAME, req.getServletPath()); + + // Client IPAddress i.e. IPAddress of the remote host who is making + // this request. + String clientIPAddress = req.getHeader("X-FORWARDED-FOR"); + if (clientIPAddress == null) { + clientIPAddress = req.getRemoteAddr(); + } + } + } +} diff --git a/src/main/java/org/onap/music/logging/format/AppMessages.java b/src/main/java/org/onap/music/logging/format/AppMessages.java new file mode 100644 index 0000000..304719c --- /dev/null +++ b/src/main/java/org/onap/music/logging/format/AppMessages.java @@ -0,0 +1,156 @@ +/* + * ============LICENSE_START========================================== + * org.onap.music + * =================================================================== + * Copyright (c) 2017 AT&T Intellectual Property + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============LICENSE_END============================================= + * ==================================================================== + */ + +package org.onap.music.logging.format; + +/** + * @author inam + * + */ +public enum AppMessages { + + + + /* + * 100-199 Security/Permission Related - Authentication problems + * [ERR100E] Missing Information + * [ERR101E] Authentication error occured + * + * 200-299 Availability/Timeout Related/IO - connectivity error - connection timeout + * [ERR200E] Connectivity + * [ERR201E] Host not available + * [ERR202E] Error while connecting + * [ERR203E] IO Error has occured + * [ERR204E] Execution Interrupted + * [ERR205E] Session Expired + * + * + * + * 300-399 Data Access/Integrity Related + * [ERR300E] Incorrect data + * + * 400-499 - Cassandra Query Related + * + * + * 500-599 - Zookeepr/Locking Related + + * + * + * 600 - 699 - MDBC Service Errors + * [ERR600E] Error initializing the MDBC + * + * 700-799 Schema Interface Type/Validation - received Pay-load checksum is + * invalid - received JSON is not valid + * + * 800-899 Business/Flow Processing Related - check out to service is not + * allowed - Roll-back is done - failed to generate heat file + * + * + * 900-999 Unknown Errors - Unexpected exception + * [ERR900E] Unexpected error occured + * [ERR901E] Number format exception + * + * + * 1000-1099 Reserved - do not use + * + */ + + + + + MISSINGINFO("[ERR100E]", "Missing Information ","Details: NA", "Please check application credentials and/or headers"), + AUTHENTICATIONERROR("[ERR101E]", "Authentication error occured ","Details: NA", "Please verify application credentials"), + + CONNCECTIVITYERROR("[ERR200E]"," Connectivity error","Details: NA ","Please check connectivity to external resources"), + HOSTUNAVAILABLE("[ERR201E]","Host not available","Details: NA","Please verify the host details"), + IOERROR("[ERR203E]","IO Error has occured","","Please check IO"), + EXECUTIONINTERRUPTED("[ERR204E]"," Execution Interrupted","",""), + + + INCORRECTDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"), + MULTIPLERECORDS("[ERR301E]"," Multiple records found",""," Please verify the request payload and try again"), + ALREADYEXIST("[ERR302E]"," Record already exist",""," Please verify the request payload and try again"), + MISSINGDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"), + + QUERYERROR("[ERR400E]","Error while processing query",""," Please verify the query"), + + + UNKNOWNERROR("[ERR900E]"," Unexpected error occured",""," Please check logs for details"); + + + + ErrorTypes eType; + ErrorSeverity alarmSeverity; + ErrorSeverity errorSeverity; + String errorCode; + String errorDescription; + String details; + String resolution; + + + AppMessages(String errorCode, String errorDescription, String details,String resolution) { + + this.errorCode = errorCode; + this.errorDescription = errorDescription; + this.details = details; + this.resolution = resolution; + } + + + + + AppMessages(ErrorTypes eType, ErrorSeverity alarmSeverity, + ErrorSeverity errorSeverity, String errorCode, String errorDescription, String details, + String resolution) { + + this.eType = eType; + this.alarmSeverity = alarmSeverity; + this.errorSeverity = errorSeverity; + this.errorCode = errorCode; + this.errorDescription = errorDescription; + this.details = details; + this.resolution = resolution; + } + + public String getDetails() { + return this.details; + } + + public String getResolution() { + return this.resolution; + } + + public String getErrorCode() { + return this.errorCode; + } + + public String getErrorDescription() { + return this.errorDescription; + } + + + + + + + +} diff --git a/src/main/java/org/onap/music/logging/format/ErrorSeverity.java b/src/main/java/org/onap/music/logging/format/ErrorSeverity.java new file mode 100644 index 0000000..49cc2f4 --- /dev/null +++ b/src/main/java/org/onap/music/logging/format/ErrorSeverity.java @@ -0,0 +1,37 @@ +/* + * ============LICENSE_START========================================== + * org.onap.music + * =================================================================== + * Copyright (c) 2017 AT&T Intellectual Property + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============LICENSE_END============================================= + * ==================================================================== + */ +package org.onap.music.logging.format; + +/** + * @author inam + * + */ +public enum ErrorSeverity { + INFO, + WARN, + ERROR, + FATAL, + CRITICAL, + MAJOR, + MINOR, + NONE, +} diff --git a/src/main/java/org/onap/music/logging/format/ErrorTypes.java b/src/main/java/org/onap/music/logging/format/ErrorTypes.java new file mode 100644 index 0000000..89dd5f8 --- /dev/null +++ b/src/main/java/org/onap/music/logging/format/ErrorTypes.java @@ -0,0 +1,44 @@ +/* + * ============LICENSE_START========================================== + * org.onap.music + * =================================================================== + * Copyright (c) 2017 AT&T Intellectual Property + * =================================================================== + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============LICENSE_END============================================= + * ==================================================================== + */ +package org.onap.music.logging.format; + +import com.att.eelf.i18n.EELFResolvableErrorEnum; + +/** + * @author inam + * + */ +public enum ErrorTypes implements EELFResolvableErrorEnum { + + + CONNECTIONERROR, + SESSIONEXPIRED, + AUTHENTICATIONERROR, + SERVICEUNAVAILABLE, + QUERYERROR, + DATAERROR, + GENERALSERVICEERROR, + MUSICSERVICEERROR, + LOCKINGERROR, + UNKNOWN, + +} diff --git a/src/main/java/org/onap/music/mdbc/ArchiveProcess.java b/src/main/java/org/onap/music/mdbc/ArchiveProcess.java new file mode 100644 index 0000000..26a1ef3 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/ArchiveProcess.java @@ -0,0 +1,42 @@ +package org.onap.music.mdbc; + +import org.json.JSONObject; + +import org.onap.music.mdbc.mixins.DBInterface; +import org.onap.music.mdbc.mixins.MusicInterface; + +public class ArchiveProcess { + protected MusicInterface mi; + protected DBInterface dbi; + + //TODO: This is a place holder for taking snapshots and moving data from redo record into actual tables + + /** + * This method is called whenever there is a DELETE on the transaction digest and should be called when ownership changes, if required + * It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL DELETE. + * Music propagates it to the other replicas. + * @param tableName This is the table on which the select is being performed + * @param oldRow This is information about the row that is being deleted + */ + @SuppressWarnings("unused") + private void deleteFromEntityTableInMusic(String tableName, JSONObject oldRow) { + TableInfo ti = dbi.getTableInfo(tableName); + mi.deleteFromEntityTableInMusic(ti,tableName, oldRow); + } + + /** + * This method is called whenever there is an INSERT or UPDATE to a the transaction digest, and should be called by an + * ownership chance. It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. + * Music propagates it to the other replicas. If the local database is in the middle of a transaction, the updates to MUSIC are + * delayed until the transaction is either committed or rolled back. + * + * @param tableName This is the table that has changed. + * @param changedRow This is information about the row that has changed, an array of objects representing the data being inserted/updated + */ + @SuppressWarnings("unused") + private void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow) { + //TODO: is this right? should we be saving updates at the client? we should leverage JDBC to handle this + TableInfo ti = dbi.getTableInfo(tableName); + mi.updateDirtyRowAndEntityTableInMusic(ti,tableName, changedRow); + } +} diff --git a/src/main/java/org/onap/music/mdbc/Configuration.java b/src/main/java/org/onap/music/mdbc/Configuration.java new file mode 100644 index 0000000..a4516dd --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/Configuration.java @@ -0,0 +1,18 @@ +package org.onap.music.mdbc; + +public class Configuration { + /** The property name to use to connect to cassandra*/ + public static final String KEY_CASSANDRA_URL = "CASSANDRA_URL"; + /** The property name to use to enable/disable the MusicSqlManager entirely. */ + public static final String KEY_DISABLED = "disabled"; + /** The property name to use to select the DB 'mixin'. */ + public static final String KEY_DB_MIXIN_NAME = "MDBC_DB_MIXIN"; + /** The property name to use to select the MUSIC 'mixin'. */ + public static final String KEY_MUSIC_MIXIN_NAME = "MDBC_MUSIC_MIXIN"; + /** The name of the default mixin to use for the DBInterface. */ + public static final String DB_MIXIN_DEFAULT = "mysql";//"h2"; + /** The name of the default mixin to use for the MusicInterface. */ + public static final String MUSIC_MIXIN_DEFAULT = "cassandra2";//"cassandra2"; + /** Default cassandra ulr*/ + public static final String CASSANDRA_URL_DEFAULT = "localhost";//"cassandra2"; +} diff --git a/src/main/java/org/onap/music/mdbc/DatabaseOperations.java b/src/main/java/org/onap/music/mdbc/DatabaseOperations.java new file mode 100644 index 0000000..b9412b7 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/DatabaseOperations.java @@ -0,0 +1,465 @@ +package org.onap.music.mdbc; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.datastore.PreparedQueryObject; +import org.onap.music.exceptions.MusicLockingException; +import org.onap.music.exceptions.MusicQueryException; +import org.onap.music.exceptions.MusicServiceException; +import org.onap.music.main.MusicCore; +import org.onap.music.main.ResultType; +import org.onap.music.main.ReturnType; + +import java.util.*; + +public class DatabaseOperations { + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabaseOperations.class); + /** + * This functions is used to generate cassandra uuid + * @return a random UUID that can be used for fields of type uuid + */ + public static String generateUniqueKey() { + return UUID.randomUUID().toString(); + } + + /** + * This functions returns the primary key used to managed a specific row in the TableToPartition tables in Music + * @param namespace namespace where the TableToPartition resides + * @param tableToPartitionTableName name of the tableToPartition table + * @param tableName name of the application table that is being added to the system + * @return primary key to be used with MUSIC + */ + public static String getTableToPartitionPrimaryKey(String namespace, String tableToPartitionTableName, String tableName){ + return namespace+"."+tableToPartitionTableName+"."+tableName; + } + + /** + * Create a new row for a table, with not assigned partition + * @param namespace namespace where the TableToPartition resides + * @param tableToPartitionTableName name of the tableToPartition table + * @param tableName name of the application table that is being added to the system + * @param lockId if the lock for this key is already hold, this is the id of that lock. + * May be null if lock is not hold for the corresponding key + */ + public static void createNewTableToPartitionRow(String namespace, String tableToPartitionTableName, + String tableName,String lockId) throws MDBCServiceException { + final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,tableName); + StringBuilder insert = new StringBuilder("INSERT INTO ") + .append(namespace) + .append('.') + .append(tableToPartitionTableName) + .append(" (tablename) VALUES ") + .append("('") + .append(tableName) + .append("');"); + PreparedQueryObject query = new PreparedQueryObject(); + query.appendQueryString(insert.toString()); + try { + executedLockedPut(namespace,tableToPartitionTableName,tableName,query,lockId,null); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create new row table to partition table "); + throw new MDBCServiceException("Initialization error: Failure to create new row table to partition table"); + } + } + + /** + * Update the partition to which a table belongs + * @param namespace namespace where the TableToPartition resides + * @param tableToPartitionTableName name of the tableToPartition table + * @param table name of the application table that is being added to the system + * @param newPartition partition to which the application table is assigned + * @param lockId if the lock for this key is already hold, this is the id of that lock. + * May be null if lock is not hold for the corresponding key + */ + public static void updateTableToPartition(String namespace, String tableToPartitionTableName, + String table, String newPartition, String lockId) throws MDBCServiceException { + final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,table); + PreparedQueryObject query = new PreparedQueryObject(); + StringBuilder update = new StringBuilder("UPDATE ") + .append(namespace) + .append('.') + .append(tableToPartitionTableName) + .append(" SET previouspartitions = previouspartitions + {") + .append(newPartition) + .append("}, partition = " ) + .append(newPartition) + .append(" WHERE tablename = '") + .append(table) + .append("';"); + query.appendQueryString(update.toString()); + try { + executedLockedPut(namespace,tableToPartitionTableName,table,query,lockId,null); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to update a row in table to partition table "); + throw new MDBCServiceException("Initialization error: Failure to update a row in table to partition table"); + } + } + + + public static String getPartitionInformationPrimaryKey(String namespace, String partitionInformationTable, String partition){ + return namespace+"."+partitionInformationTable+"."+partition; + } + + /** + * Create a new row, when a new partition is initialized + * @param namespace namespace to which the partition info table resides in Cassandra + * @param partitionInfoTableName name of the partition information table + * @param replicationFactor associated replicated factor for the partition (max of all the tables) + * @param tables list of tables that are within this partitoin + * @param lockId if the lock for this key is already hold, this is the id of that lock. May be null if lock is not hold for the corresponding key + * @return the partition uuid associated to the new row + */ + public static String createPartitionInfoRow(String namespace, String partitionInfoTableName, + int replicationFactor, List tables, String lockId) throws MDBCServiceException { + String id = generateUniqueKey(); + final String primaryKey = getPartitionInformationPrimaryKey(namespace,partitionInfoTableName,id); + StringBuilder insert = new StringBuilder("INSERT INTO ") + .append(namespace) + .append('.') + .append(partitionInfoTableName) + .append(" (partition,replicationfactor,tables) VALUES ") + .append("(") + .append(id) + .append(",") + .append(replicationFactor) + .append(",{"); + boolean first = true; + for(String table: tables){ + if(!first){ + insert.append(","); + } + first = false; + insert.append("'") + .append(table) + .append("'"); + } + insert.append("});"); + PreparedQueryObject query = new PreparedQueryObject(); + query.appendQueryString(insert.toString()); + try { + executedLockedPut(namespace,partitionInfoTableName,id,query,lockId,null); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create new row in partition information table "); + throw new MDBCServiceException("Initialization error: Failure to create new row in partition information table"); + } + return id; + } + + /** + * Update the TIT row and table that currently handles the partition + * @param namespace namespace to which the partition info table resides in Cassandra + * @param partitionInfoTableName name of the partition information table + * @param partitionId row identifier for the partition being modiefd + * @param newTitRow new TIT row and table that are handling this partition + * @param owner owner that is handling the new tit row (url to the corresponding etdb nodej + * @param lockId if the lock for this key is already hold, this is the id of that lock. May be null if lock is not hold for the corresponding key + */ + public static void updateRedoRow(String namespace, String partitionInfoTableName, String partitionId, + RedoRow newTitRow, String owner, String lockId) throws MDBCServiceException { + final String primaryKey = getTableToPartitionPrimaryKey(namespace,partitionInfoTableName,partitionId); + PreparedQueryObject query = new PreparedQueryObject(); + String newOwner = (owner==null)?"":owner; + StringBuilder update = new StringBuilder("UPDATE ") + .append(namespace) + .append('.') + .append(partitionInfoTableName) + .append(" SET currentowner='") + .append(newOwner) + .append("', latesttitindex=") + .append(newTitRow.getRedoRowIndex()) + .append(", latesttittable='") + .append(newTitRow.getRedoTableName()) + .append("' WHERE partition = ") + .append(partitionId) + .append(";"); + query.appendQueryString(update.toString()); + try { + executedLockedPut(namespace,partitionInfoTableName,partitionId,query,lockId,null); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to add new owner to partition in music table "); + throw new MDBCServiceException("Initialization error:Failure to add new owner to partition in music table "); + } + } + + /** + * Create the first row in the history of the redo history table for a given partition + * @param namespace namespace to which the redo history table resides in Cassandra + * @param redoHistoryTableName name of the table where the row is being created + * @param firstTitRow first tit associated to the partition + * @param partitionId partition for which a history is created + */ + public static void createRedoHistoryBeginRow(String namespace, String redoHistoryTableName, + RedoRow firstTitRow, String partitionId, String lockId) throws MDBCServiceException { + createRedoHistoryRow(namespace,redoHistoryTableName,firstTitRow,partitionId, new ArrayList<>(),lockId); + } + + /** + * Create a new row on the history for a given partition + * @param namespace namespace to which the redo history table resides in Cassandra + * @param redoHistoryTableName name of the table where the row is being created + * @param currentRow new tit row associated to the partition + * @param partitionId partition for which a history is created + * @param parentsRows parent tit rows associated to this partition + */ + public static void createRedoHistoryRow(String namespace, String redoHistoryTableName, + RedoRow currentRow, String partitionId, List parentsRows, String lockId) throws MDBCServiceException { + final String primaryKey = partitionId+"-"+currentRow.getRedoTableName()+"-"+currentRow.getRedoRowIndex(); + StringBuilder insert = new StringBuilder("INSERT INTO ") + .append(namespace) + .append('.') + .append(redoHistoryTableName) + .append(" (partition,redotable,redoindex,previousredo) VALUES ") + .append("(") + .append(partitionId) + .append(",'") + .append(currentRow.getRedoTableName()) + .append("',") + .append(currentRow.getRedoRowIndex()) + .append(",{"); + boolean first = true; + for(RedoRow parent: parentsRows){ + if(!first){ + insert.append(","); + } + else{ + first = false; + } + insert.append("('") + .append(parent.getRedoTableName()) + .append("',") + .append(parent.getRedoRowIndex()) + .append("),"); + } + insert.append("});"); + PreparedQueryObject query = new PreparedQueryObject(); + query.appendQueryString(insert.toString()); + try { + executedLockedPut(namespace,redoHistoryTableName,primaryKey,query,lockId,null); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to add new row to redo history"); + throw new MDBCServiceException("Initialization error:Failure to add new row to redo history"); + } + } + + /** + * Creates a new empty tit row + * @param namespace namespace where the tit table is located + * @param titTableName name of the corresponding tit table where the new row is added + * @param partitionId partition to which the redo log is hold + * @return uuid associated to the new row + */ + public static String CreateEmptyTitRow(String namespace, String titTableName, + String partitionId, String lockId) throws MDBCServiceException { + String id = generateUniqueKey(); + StringBuilder insert = new StringBuilder("INSERT INTO ") + .append(namespace) + .append('.') + .append(titTableName) + .append(" (id,applied,latestapplied,partition,redo) VALUES ") + .append("(") + .append(id) + .append(",false,-1,") + .append(partitionId) + .append(",[]);"); + PreparedQueryObject query = new PreparedQueryObject(); + query.appendQueryString(insert.toString()); + try { + executedLockedPut(namespace,titTableName,id,query,lockId,null); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to add new row to transaction information"); + throw new MDBCServiceException("Initialization error:Failure to add new row to transaction information"); + } + return id; + } + + /** + * This function creates the Table To Partition table. It contain information related to + */ + public static void CreateTableToPartitionTable(String musicNamespace, String tableToPartitionTableName) + throws MDBCServiceException { + String tableName = tableToPartitionTableName; + String priKey = "tablename"; + StringBuilder fields = new StringBuilder(); + fields.append("tablename text, "); + fields.append("partition uuid, "); + fields.append("previouspartitions set "); + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", + musicNamespace, tableName, fields, priKey); + try { + executeMusicWriteQuery(musicNamespace,tableName,cql); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create table to partition table"); + throw(e); + } + } + + public static void CreatePartitionInfoTable(String musicNamespace, String partitionInformationTableName) + throws MDBCServiceException { + String tableName = partitionInformationTableName; + String priKey = "partition"; + StringBuilder fields = new StringBuilder(); + fields.append("partition uuid, "); + fields.append("latesttittable text, "); + fields.append("latesttitindex uuid, "); + fields.append("tables set, "); + fields.append("replicationfactor int, "); + fields.append("currentowner text"); + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", + musicNamespace, tableName, fields, priKey); + try { + executeMusicWriteQuery(musicNamespace,tableName,cql); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create partition information table"); + throw(e); + } + } + + public static void CreateRedoHistoryTable(String musicNamespace, String redoHistoryTableName) + throws MDBCServiceException { + String tableName = redoHistoryTableName; + String priKey = "partition,redotable,redoindex"; + StringBuilder fields = new StringBuilder(); + fields.append("partition uuid, "); + fields.append("redotable text, "); + fields.append("redoindex uuid, "); + //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly + fields.append("previousredo set>>"); + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", + musicNamespace, tableName, fields, priKey); + try { + executeMusicWriteQuery(musicNamespace,tableName,cql); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create redo history table"); + throw(e); + } + } + + /** + * This method executes a write query in Music + * @param cql the CQL to be sent to Cassandra + */ + protected static void executeMusicWriteQuery(String keyspace, String table, String cql) + throws MDBCServiceException { + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + ResultType rt = null; + try { + rt = MusicCore.createTable(keyspace,table,pQueryObject,"critical"); + } catch (MusicServiceException e) { + e.printStackTrace(); + } + if (rt.getResult().toLowerCase().equals("failure")) { + throw new MDBCServiceException("Music eventual put failed"); + } + } + + protected static void executedLockedPut(String namespace, String tableName, + String primaryKeyWithoutDomain, PreparedQueryObject queryObject, String lockId, + MusicCore.Condition conditionInfo) throws MDBCServiceException { + ReturnType rt ; + if(lockId==null) { + try { + rt = MusicCore.atomicPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, conditionInfo); + } catch (MusicLockingException e) { + logger.error("Music locked put failed"); + throw new MDBCServiceException("Music locked put failed"); + } catch (MusicServiceException e) { + logger.error("Music service fail: Music locked put failed"); + throw new MDBCServiceException("Music service fail: Music locked put failed"); + } catch (MusicQueryException e) { + logger.error("Music query fail: locked put failed"); + throw new MDBCServiceException("Music query fail: Music locked put failed"); + } + } + else { + rt = MusicCore.criticalPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, lockId, conditionInfo); + } + if (rt.getResult().getResult().toLowerCase().equals("failure")) { + throw new MDBCServiceException("Music locked put failed"); + } + } + + public static void createNamespace(String namespace, int replicationFactor) throws MDBCServiceException { + Map replicationInfo = new HashMap(); + replicationInfo.put("'class'", "'SimpleStrategy'"); + replicationInfo.put("'replication_factor'", replicationFactor); + + PreparedQueryObject queryObject = new PreparedQueryObject(); + queryObject.appendQueryString( + "CREATE KEYSPACE " + namespace + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":")); + + try { + MusicCore.nonKeyRelatedPut(queryObject, "critical"); + } catch (MusicServiceException e) { + if (e.getMessage().equals("Keyspace "+namespace+" already exists")) { + // ignore + } else { + logger.error("Error creating namespace: "+namespace); + throw new MDBCServiceException("Error creating namespace: "+namespace+". Internal error:"+e.getErrorMessage()); + } + } + } + + + /** + * This function creates the MusicTxDigest table. It contain information related to each transaction committed + * * LeaseId: id associated with the lease, text + * * LeaseCounter: transaction number under this lease, bigint \TODO this may need to be a varint later + * * TransactionDigest: text that contains all the changes in the transaction + */ + public static void CreateMusicTxDigest(int musicTxDigestTableNumber, String musicNamespace, String musicTxDigestTableName) throws MDBCServiceException { + String tableName = musicTxDigestTableName; + if(musicTxDigestTableNumber >= 0) { + StringBuilder table = new StringBuilder(); + table.append(tableName); + table.append("-"); + table.append(Integer.toString(musicTxDigestTableNumber)); + tableName=table.toString(); + } + String priKey = "leaseid,leasecounter"; + StringBuilder fields = new StringBuilder(); + fields.append("leaseid text, "); + fields.append("leasecounter varint, "); + fields.append("transactiondigest text ");//notice lack of ',' + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey); + try { + executeMusicWriteQuery(musicNamespace,tableName,cql); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create redo records table"); + throw(e); + } + } + + /** + * This function creates the TransactionInformation table. It contain information related + * to the transactions happening in a given partition. + * * The schema of the table is + * * Id, uiid. + * * Partition, uuid id of the partition + * * LatestApplied, int indicates which values from the redologtable wast the last to be applied to the data tables + * * Applied: boolean, indicates if all the values in this redo log table where already applied to data tables + * * Redo: list of uiids associated to the Redo Records Table + * + */ + public static void CreateMusicRangeInformationTable(String musicNamespace, String musicRangeInformationTableName) throws MDBCServiceException { + String tableName = musicRangeInformationTableName; + String priKey = "id"; + StringBuilder fields = new StringBuilder(); + fields.append("id uuid, "); + fields.append("partition uuid, "); + fields.append("latestapplied int, "); + fields.append("applied boolean, "); + //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly + fields.append("redo list>>> "); + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey); + try { + executeMusicWriteQuery(musicNamespace,tableName,cql); + } catch (MDBCServiceException e) { + logger.error("Initialization error: Failure to create transaction information table"); + throw(e); + } + } + + + +} diff --git a/src/main/java/org/onap/music/mdbc/DatabasePartition.java b/src/main/java/org/onap/music/mdbc/DatabasePartition.java new file mode 100644 index 0000000..79abd3b --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/DatabasePartition.java @@ -0,0 +1,189 @@ +package org.onap.music.mdbc; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.util.HashSet; +import java.util.Set; + +import org.onap.music.logging.EELFLoggerDelegate; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; + +/** + * A database range contain information about what ranges should be hosted in the current MDBC instance + * A database range with an empty map, is supposed to contain all the tables in Music. + * @author Enrique Saurez + */ +public class DatabasePartition { + private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabasePartition.class); + + private String musicRangeInformationTable;//Table that currently contains the REDO log for this partition + private String musicRangeInformationIndex;//Index that can be obtained either from + private String musicTxDigestTable; + private String partitionId; + private String lockId; + protected Set ranges; + + /** + * Each range represents a partition of the database, a database partition is a union of this partitions. + * The only requirement is that the ranges are not overlapping. + */ + + public DatabasePartition() { + ranges = new HashSet<>(); + } + + public DatabasePartition(Set knownRanges, String mriIndex, String mriTable, String partitionId, String lockId, String musicTxDigestTable) { + if(knownRanges != null) { + ranges = knownRanges; + } + else { + ranges = new HashSet<>(); + } + + if(musicTxDigestTable != null) { + this.setMusicTxDigestTable(musicTxDigestTable); + } + else{ + this.setMusicTxDigestTable(""); + } + + if(mriIndex != null) { + this.setMusicRangeInformationIndex(mriIndex); + } + else { + this.setMusicRangeInformationIndex(""); + } + + if(mriTable != null) { + this.setMusicRangeInformationTable(mriTable); + } + else { + this.setMusicRangeInformationTable(""); + } + + if(partitionId != null) { + this.setPartitionId(partitionId); + } + else { + this.setPartitionId(""); + } + + if(lockId != null) { + this.setLockId(lockId); + } + else { + this.setLockId(""); + } + } + + public String getMusicRangeInformationTable() { + return musicRangeInformationTable; + } + + public void setMusicRangeInformationTable(String musicRangeInformationTable) { + this.musicRangeInformationTable = musicRangeInformationTable; + } + + public String getMusicRangeInformationIndex() { + return musicRangeInformationIndex; + } + + public void setMusicRangeInformationIndex(String musicRangeInformationIndex) { + this.musicRangeInformationIndex = musicRangeInformationIndex; + } + + /** + * Add a new range to the ones own by the local MDBC + * @param newRange range that is being added + * @throws IllegalArgumentException + */ + public synchronized void addNewRange(Range newRange) { + //Check overlap + for(Range r : ranges) { + if(r.overlaps(newRange)) { + throw new IllegalArgumentException("Range is already contain by a previous range"); + } + } + if(!ranges.contains(newRange)) { + ranges.add(newRange); + } + } + + /** + * Delete a range that is being modified + * @param rangeToDel limits of the range + */ + public synchronized void deleteRange(Range rangeToDel) { + if(!ranges.contains(rangeToDel)) { + logger.error(EELFLoggerDelegate.errorLogger,"Range doesn't exist"); + throw new IllegalArgumentException("Invalid table"); + } + ranges.remove(rangeToDel); + } + + /** + * Get all the ranges that are currently owned + * @return ranges + */ + public synchronized Range[] getSnapshot() { + return (Range[]) ranges.toArray(); + } + + /** + * Serialize the ranges + * @return serialized ranges + */ + public String toJson() { + GsonBuilder builder = new GsonBuilder(); + builder.setPrettyPrinting().serializeNulls();; + Gson gson = builder.create(); + return gson.toJson(this); + } + + /** + * Function to obtain the configuration + * @param filepath path to the database range + * @return a new object of type DatabaseRange + * @throws FileNotFoundException + */ + + public static DatabasePartition readJsonFromFile( String filepath) throws FileNotFoundException { + BufferedReader br; + try { + br = new BufferedReader( + new FileReader(filepath)); + } catch (FileNotFoundException e) { + logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e); + throw e; + } + Gson gson = new Gson(); + DatabasePartition range = gson.fromJson(br, DatabasePartition.class); + return range; + } + + public String getPartitionId() { + return partitionId; + } + + public void setPartitionId(String partitionId) { + this.partitionId = partitionId; + } + + public String getLockId() { + return lockId; + } + + public void setLockId(String lockId) { + this.lockId = lockId; + } + + public String getMusicTxDigestTable() { + return musicTxDigestTable; + } + + public void setMusicTxDigestTable(String musicTxDigestTable) { + this.musicTxDigestTable = musicTxDigestTable; + } +} diff --git a/src/main/java/org/onap/music/mdbc/LockId.java b/src/main/java/org/onap/music/mdbc/LockId.java new file mode 100644 index 0000000..9401f26 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/LockId.java @@ -0,0 +1,46 @@ +package org.onap.music.mdbc; + +public class LockId { + private String primaryKey; + private String domain; + private String lockReference; + + public LockId(String primaryKey, String domain, String lockReference){ + this.primaryKey = primaryKey; + this.domain = domain; + if(lockReference == null) { + this.lockReference = ""; + } + else{ + this.lockReference = lockReference; + } + } + + public String getFullyQualifiedLockKey(){ + return this.domain+"."+this.primaryKey; + } + + public String getPrimaryKey() { + return primaryKey; + } + + public void setPrimaryKey(String primaryKey) { + this.primaryKey = primaryKey; + } + + public String getDomain() { + return domain; + } + + public void setDomain(String domain) { + this.domain = domain; + } + + public String getLockReference() { + return lockReference; + } + + public void setLockReference(String lockReference) { + this.lockReference = lockReference; + } +} diff --git a/src/main/java/org/onap/music/mdbc/MDBCUtils.java b/src/main/java/org/onap/music/mdbc/MDBCUtils.java new file mode 100644 index 0000000..2e150bd --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MDBCUtils.java @@ -0,0 +1,70 @@ +package org.onap.music.mdbc; + +import java.io.*; +import java.util.Base64; +import java.util.Deque; +import java.util.HashMap; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.logging.format.AppMessages; +import org.onap.music.logging.format.ErrorSeverity; +import org.onap.music.logging.format.ErrorTypes; +import org.onap.music.mdbc.tables.Operation; +import org.onap.music.mdbc.tables.StagingTable; + +import javassist.bytecode.Descriptor.Iterator; + +import org.apache.commons.lang3.tuple.Pair; +import org.json.JSONObject; + +public class MDBCUtils { + /** Write the object to a Base64 string. */ + public static String toString( Serializable o ) throws IOException { + //TODO We may want to also compress beside serialize + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + ObjectOutputStream oos = new ObjectOutputStream(baos); + oos.writeObject(o); + oos.close(); + return Base64.getEncoder().encodeToString(baos.toByteArray()); + } + finally{ + baos.close(); + } + } + + public static String toString( JSONObject o) throws IOException { + //TODO We may want to also compress beside serialize + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream( baos ); + oos.writeObject( o ); + oos.close(); + return Base64.getEncoder().encodeToString(baos.toByteArray()); + } + + /** Read the object from Base64 string. */ + public static Object fromString( String s ) throws IOException , + ClassNotFoundException { + byte [] data = Base64.getDecoder().decode( s ); + ObjectInputStream ois = new ObjectInputStream( + new ByteArrayInputStream( data ) ); + Object o = ois.readObject(); + ois.close(); + return o; + } + + public static void saveToFile(String serializedContent, String filename, EELFLoggerDelegate logger) throws IOException { + try (PrintWriter fout = new PrintWriter(filename)) { + fout.println(serializedContent); + } catch (FileNotFoundException e) { + if(logger!=null){ + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.IOERROR, ErrorTypes.UNKNOWN, ErrorSeverity.CRITICAL); + } + else { + e.printStackTrace(); + } + throw e; + } + } + +} diff --git a/src/main/java/org/onap/music/mdbc/MdbcCallableStatement.java b/src/main/java/org/onap/music/mdbc/MdbcCallableStatement.java new file mode 100644 index 0000000..95a49a8 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MdbcCallableStatement.java @@ -0,0 +1,738 @@ +package org.onap.music.mdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.Ref; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.Map; + +import org.onap.music.logging.EELFLoggerDelegate; + +/** + * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, + * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. + * + * @author Robert Eby + */ +public class MdbcCallableStatement extends MdbcPreparedStatement implements CallableStatement { + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcCallableStatement.class); + @SuppressWarnings("unused") + private static final String DATASTAX_PREFIX = "com.datastax.driver"; + + public MdbcCallableStatement(Statement stmt, MusicSqlManager m) { + super(stmt, m); + } + + public MdbcCallableStatement(Statement stmt, String sql, MusicSqlManager mgr) { + super(stmt, sql, mgr); + } + + @Override + public T unwrap(Class iface) throws SQLException { + logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName()); + return stmt.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName()); + return stmt.isWrapperFor(iface); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + ((CallableStatement)stmt).setURL(parameterIndex, x); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return ((CallableStatement)stmt).getParameterMetaData(); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + ((CallableStatement)stmt).setRowId(parameterIndex, x); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + ((CallableStatement)stmt).setNString(parameterIndex, value); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setClob(parameterIndex, reader, length); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, reader, length); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterIndex, x); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterIndex, x); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setClob(parameterIndex, reader); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterIndex, inputStream); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, reader); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale); + } + + @Override + public boolean wasNull() throws SQLException { + return ((CallableStatement)stmt).wasNull(); + } + + @Override + public String getString(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getString(parameterIndex); + } + + @Override + public boolean getBoolean(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBoolean(parameterIndex); + } + + @Override + public byte getByte(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getByte(parameterIndex); + } + + @Override + public short getShort(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getShort(parameterIndex); + } + + @Override + public int getInt(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getInt(parameterIndex); + } + + @Override + public long getLong(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getLong(parameterIndex); + } + + @Override + public float getFloat(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getFloat(parameterIndex); + } + + @Override + public double getDouble(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getDouble(parameterIndex); + } + + @SuppressWarnings("deprecation") + @Override + public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { + return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale); + } + + @Override + public byte[] getBytes(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBytes(parameterIndex); + } + + @Override + public Date getDate(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterIndex); + } + + @Override + public Time getTime(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterIndex); + } + + @Override + public Timestamp getTimestamp(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterIndex); + } + + @Override + public Object getObject(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterIndex); + } + + @Override + public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBigDecimal(parameterIndex); + } + + @Override + public Object getObject(int parameterIndex, Map> map) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterIndex, map); + } + + @Override + public Ref getRef(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getRef(parameterIndex); + } + + @Override + public Blob getBlob(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBlob(parameterIndex); + } + + @Override + public Clob getClob(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getClob(parameterIndex); + } + + @Override + public Array getArray(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getArray(parameterIndex); + } + + @Override + public Date getDate(int parameterIndex, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterIndex, cal); + } + + @Override + public Time getTime(int parameterIndex, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterIndex, cal); + } + + @Override + public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName); + } + + @Override + public URL getURL(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getURL(parameterIndex); + } + + @Override + public void setURL(String parameterName, URL val) throws SQLException { + ((CallableStatement)stmt).setURL(parameterName, val); + } + + @Override + public void setNull(String parameterName, int sqlType) throws SQLException { + ((CallableStatement)stmt).setNull(parameterName, sqlType); + } + + @Override + public void setBoolean(String parameterName, boolean x) throws SQLException { + ((CallableStatement)stmt).setBoolean(parameterName, x); + } + + @Override + public void setByte(String parameterName, byte x) throws SQLException { + ((CallableStatement)stmt).setByte(parameterName, x); + } + + @Override + public void setShort(String parameterName, short x) throws SQLException { + ((CallableStatement)stmt).setShort(parameterName, x); + } + + @Override + public void setInt(String parameterName, int x) throws SQLException { + ((CallableStatement)stmt).setInt(parameterName, x); + } + + @Override + public void setLong(String parameterName, long x) throws SQLException { + ((CallableStatement)stmt).setLong(parameterName, x); + } + + @Override + public void setFloat(String parameterName, float x) throws SQLException { + ((CallableStatement)stmt).setFloat(parameterName, x); + } + + @Override + public void setDouble(String parameterName, double x) throws SQLException { + ((CallableStatement)stmt).setDouble(parameterName, x); + } + + @Override + public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { + ((CallableStatement)stmt).setBigDecimal(parameterName, x); + } + + @Override + public void setString(String parameterName, String x) throws SQLException { + ((CallableStatement)stmt).setString(parameterName, x); + } + + @Override + public void setBytes(String parameterName, byte[] x) throws SQLException { + ((CallableStatement)stmt).setBytes(parameterName, x); + } + + @Override + public void setDate(String parameterName, Date x) throws SQLException { + ((CallableStatement)stmt).setDate(parameterName, x); + } + + @Override + public void setTime(String parameterName, Time x) throws SQLException { + ((CallableStatement)stmt).setTime(parameterName, x); + } + + @Override + public void setTimestamp(String parameterName, Timestamp x) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterName, x); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); + } + + @Override + public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException { + ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale); + } + + @Override + public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { + ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType); + } + + @Override + public void setObject(String parameterName, Object x) throws SQLException { + ((CallableStatement)stmt).setObject(parameterName, x); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); + } + + @Override + public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setDate(parameterName, x, cal); + } + + @Override + public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTime(parameterName, x, cal); + } + + @Override + public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterName, x, cal); + } + + @Override + public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName); + } + + @Override + public String getString(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getString(parameterName); + } + + @Override + public boolean getBoolean(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBoolean(parameterName); + } + + @Override + public byte getByte(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getByte(parameterName); + } + + @Override + public short getShort(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getShort(parameterName); + } + + @Override + public int getInt(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getInt(parameterName); + } + + @Override + public long getLong(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getLong(parameterName); + } + + @Override + public float getFloat(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getFloat(parameterName); + } + + @Override + public double getDouble(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getDouble(parameterName); + } + + @Override + public byte[] getBytes(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBytes(parameterName); + } + + @Override + public Date getDate(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterName); + } + + @Override + public Time getTime(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterName); + } + + @Override + public Timestamp getTimestamp(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterName); + } + + @Override + public Object getObject(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterName); + } + + @Override + public BigDecimal getBigDecimal(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBigDecimal(parameterName); + } + + @Override + public Object getObject(String parameterName, Map> map) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterName, map); + } + + @Override + public Ref getRef(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getRef(parameterName); + } + + @Override + public Blob getBlob(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBlob(parameterName); + } + + @Override + public Clob getClob(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getClob(parameterName); + } + + @Override + public Array getArray(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getArray(parameterName); + } + + @Override + public Date getDate(String parameterName, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterName, cal); + } + + @Override + public Time getTime(String parameterName, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterName, cal); + } + + @Override + public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterName, cal); + } + + @Override + public URL getURL(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getURL(parameterName); + } + + @Override + public RowId getRowId(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getRowId(parameterIndex); + } + + @Override + public RowId getRowId(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getRowId(parameterName); + } + + @Override + public void setRowId(String parameterName, RowId x) throws SQLException { + ((CallableStatement)stmt).setRowId(parameterName, x); + } + + @Override + public void setNString(String parameterName, String value) throws SQLException { + ((CallableStatement)stmt).setNString(parameterName, value); + } + + @Override + public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length); + } + + @Override + public void setNClob(String parameterName, NClob value) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterName, value); + } + + @Override + public void setClob(String parameterName, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setClob(parameterName, reader, length); + } + + @Override + public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterName, inputStream, length); + } + + @Override + public void setNClob(String parameterName, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterName, reader, length); + } + + @Override + public NClob getNClob(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getNClob(parameterIndex); + } + + @Override + public NClob getNClob(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getNClob(parameterName); + } + + @Override + public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { + ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject); + } + + @Override + public SQLXML getSQLXML(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getSQLXML(parameterIndex); + } + + @Override + public SQLXML getSQLXML(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getSQLXML(parameterName); + } + + @Override + public String getNString(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getNString(parameterIndex); + } + + @Override + public String getNString(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getNString(parameterName); + } + + @Override + public Reader getNCharacterStream(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getNCharacterStream(parameterIndex); + } + + @Override + public Reader getNCharacterStream(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getNCharacterStream(parameterName); + } + + @Override + public Reader getCharacterStream(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getCharacterStream(parameterIndex); + } + + @Override + public Reader getCharacterStream(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getCharacterStream(parameterName); + } + + @Override + public void setBlob(String parameterName, Blob x) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterName, x); + } + + @Override + public void setClob(String parameterName, Clob x) throws SQLException { + ((CallableStatement)stmt).setClob(parameterName, x); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterName, x); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterName, x); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterName, reader); + } + + @Override + public void setNCharacterStream(String parameterName, Reader value) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterName, value); + } + + @Override + public void setClob(String parameterName, Reader reader) throws SQLException { + ((CallableStatement)stmt).setClob(parameterName, reader); + } + + @Override + public void setBlob(String parameterName, InputStream inputStream) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterName, inputStream); + } + + @Override + public void setNClob(String parameterName, Reader reader) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterName, reader); + } + + @Override + public T getObject(int parameterIndex, Class type) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterIndex, type); + } + + @Override + public T getObject(String parameterName, Class type) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterName, type); + } + +} diff --git a/src/main/java/org/onap/music/mdbc/MdbcConnection.java b/src/main/java/org/onap/music/mdbc/MdbcConnection.java new file mode 100644 index 0000000..b553fb5 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MdbcConnection.java @@ -0,0 +1,419 @@ +package org.onap.music.mdbc; + +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.exceptions.QueryException; +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.logging.format.AppMessages; +import org.onap.music.logging.format.ErrorSeverity; +import org.onap.music.logging.format.ErrorTypes; +import org.onap.music.mdbc.mixins.MusicInterface; +import org.onap.music.mdbc.tables.TxCommitProgress; + + +/** + * ProxyConnection is a proxy to a JDBC driver Connection. It uses the MusicSqlManager to copy + * data to and from Cassandra and the underlying JDBC database as needed. It will notify the underlying + * MusicSqlManager of any calls to commit(), rollback() or setAutoCommit(). + * Otherwise it just forwards all requests to the underlying Connection of the 'real' database. + * + * @author Robert Eby + */ +public class MdbcConnection implements Connection { + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcConnection.class); + + private final String id; // This is the transaction id, assigned to this connection. There is no need to change the id, if connection is reused + private final Connection conn; // the JDBC Connection to the actual underlying database + private final MusicSqlManager mgr; // there should be one MusicSqlManager in use per Connection + private final TxCommitProgress progressKeeper; + private final DatabasePartition partition; + + public MdbcConnection(String id, String url, Connection c, Properties info, MusicInterface mi, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException { + this.id = id; + if (c == null) { + throw new MDBCServiceException("Connection is null"); + } + this.conn = c; + try { + this.mgr = new MusicSqlManager(url, c, info, mi); + } catch (MDBCServiceException e) { + logger.error("Failure in creating Music SQL Manager"); + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw e; + } + try { + this.mgr.setAutoCommit(c.getAutoCommit(),null,null,null); + } catch (SQLException e) { + logger.error("Failure in autocommit"); + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + } + + // Verify the tables in MUSIC match the tables in the database + // and create triggers on any tables that need them + //mgr.synchronizeTableData(); + if ( mgr != null ) try { + mgr.synchronizeTables(); + } catch (QueryException e) { + logger.error("Error syncrhonizing tables"); + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + } + else { + logger.error(EELFLoggerDelegate.errorLogger, "MusicSqlManager was not correctly created", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL); + throw new MDBCServiceException("Music SQL Manager object is null or invalid"); + } + this.progressKeeper = progressKeeper; + this.partition = partition; + logger.debug("Mdbc connection created with id: "+id); + } + + @Override + public T unwrap(Class iface) throws SQLException { + logger.error(EELFLoggerDelegate.errorLogger, "proxyconn unwrap: " + iface.getName()); + return conn.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + logger.error(EELFLoggerDelegate.errorLogger, "proxystatement iswrapperfor: " + iface.getName()); + return conn.isWrapperFor(iface); + } + + @Override + public Statement createStatement() throws SQLException { + return new MdbcCallableStatement(conn.createStatement(), mgr); + } + + @Override + public PreparedStatement prepareStatement(String sql) throws SQLException { + //TODO: grab the sql call from here and all the other preparestatement calls + return new MdbcPreparedStatement(conn.prepareStatement(sql), sql, mgr); + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return new MdbcCallableStatement(conn.prepareCall(sql), mgr); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + return conn.nativeSQL(sql); + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + boolean b = conn.getAutoCommit(); + if (b != autoCommit) { + if(progressKeeper!=null) progressKeeper.commitRequested(id); + try { + mgr.setAutoCommit(autoCommit,id,progressKeeper,partition); + if(progressKeeper!=null) + progressKeeper.setMusicDone(id); + } catch (MDBCServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL); + throw new SQLException("Failure commiting to MUSIC"); + } + conn.setAutoCommit(autoCommit); + if(progressKeeper!=null) { + progressKeeper.setSQLDone(id); + } + if(progressKeeper!=null&&progressKeeper.isComplete(id)){ + progressKeeper.reinitializeTxProgress(id); + } + } + } + + @Override + public boolean getAutoCommit() throws SQLException { + return conn.getAutoCommit(); + } + + @Override + public void commit() throws SQLException { + if(progressKeeper.isComplete(id)) { + return; + } + if(progressKeeper != null) { + progressKeeper.commitRequested(id); + } + + try { + mgr.commit(id,progressKeeper,partition); + } catch (MDBCServiceException e) { + //If the commit fail, then a new commitId should be used + logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL); + throw new SQLException("Failure commiting to MUSIC"); + } + + if(progressKeeper != null) { + progressKeeper.setMusicDone(id); + } + + conn.commit(); + + if(progressKeeper != null) { + progressKeeper.setSQLDone(id); + } + //MusicMixin.releaseZKLocks(MusicMixin.currentLockMap.get(getConnID())); + if(progressKeeper.isComplete(id)){ + progressKeeper.reinitializeTxProgress(id); + } + } + + @Override + public void rollback() throws SQLException { + mgr.rollback(); + conn.rollback(); + progressKeeper.reinitializeTxProgress(id); + } + + @Override + public void close() throws SQLException { + logger.debug("Closing mdbc connection with id:"+id); + if (mgr != null) { + logger.debug("Closing mdbc manager with id:"+id); + mgr.close(); + } + if (conn != null && !conn.isClosed()) { + logger.debug("Closing jdbc from mdbc with id:"+id); + conn.close(); + logger.debug("Connection was closed for id:" + id); + } + } + + @Override + public boolean isClosed() throws SQLException { + return conn.isClosed(); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + return conn.getMetaData(); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + conn.setReadOnly(readOnly); + } + + @Override + public boolean isReadOnly() throws SQLException { + return conn.isReadOnly(); + } + + @Override + public void setCatalog(String catalog) throws SQLException { + conn.setCatalog(catalog); + } + + @Override + public String getCatalog() throws SQLException { + return conn.getCatalog(); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + conn.setTransactionIsolation(level); + } + + @Override + public int getTransactionIsolation() throws SQLException { + return conn.getTransactionIsolation(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return conn.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + conn.clearWarnings(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency), mgr); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency), sql, mgr); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency), mgr); + } + + @Override + public Map> getTypeMap() throws SQLException { + return conn.getTypeMap(); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + conn.setTypeMap(map); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + conn.setHoldability(holdability); + } + + @Override + public int getHoldability() throws SQLException { + return conn.getHoldability(); + } + + @Override + public Savepoint setSavepoint() throws SQLException { + return conn.setSavepoint(); + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + return conn.setSavepoint(name); + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + conn.rollback(savepoint); + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + conn.releaseSavepoint(savepoint); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability), mgr); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability), sql, mgr); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability), mgr); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return new MdbcPreparedStatement(conn.prepareStatement(sql, autoGeneratedKeys), sql, mgr); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return new MdbcPreparedStatement(conn.prepareStatement(sql, columnIndexes), sql, mgr); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return new MdbcPreparedStatement(conn.prepareStatement(sql, columnNames), sql, mgr); + } + + @Override + public Clob createClob() throws SQLException { + return conn.createClob(); + } + + @Override + public Blob createBlob() throws SQLException { + return conn.createBlob(); + } + + @Override + public NClob createNClob() throws SQLException { + return conn.createNClob(); + } + + @Override + public SQLXML createSQLXML() throws SQLException { + return conn.createSQLXML(); + } + + @Override + public boolean isValid(int timeout) throws SQLException { + return conn.isValid(timeout); + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + conn.setClientInfo(name, value); + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + conn.setClientInfo(properties); + } + + @Override + public String getClientInfo(String name) throws SQLException { + return conn.getClientInfo(name); + } + + @Override + public Properties getClientInfo() throws SQLException { + return conn.getClientInfo(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + return conn.createArrayOf(typeName, elements); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + return conn.createStruct(typeName, attributes); + } + + @Override + public void setSchema(String schema) throws SQLException { + conn.setSchema(schema); + } + + @Override + public String getSchema() throws SQLException { + return conn.getSchema(); + } + + @Override + public void abort(Executor executor) throws SQLException { + conn.abort(executor); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + conn.setNetworkTimeout(executor, milliseconds); + } + + @Override + public int getNetworkTimeout() throws SQLException { + return conn.getNetworkTimeout(); + } +} diff --git a/src/main/java/org/onap/music/mdbc/MdbcPreparedStatement.java b/src/main/java/org/onap/music/mdbc/MdbcPreparedStatement.java new file mode 100644 index 0000000..e781b4b --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MdbcPreparedStatement.java @@ -0,0 +1,743 @@ +package org.onap.music.mdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; + +import org.apache.commons.lang3.StringUtils; + +import org.onap.music.logging.EELFLoggerDelegate; + +/** + * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, + * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. + * + * @author Robert Eby + */ +public class MdbcPreparedStatement extends MdbcStatement implements PreparedStatement { + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcPreparedStatement.class); + private static final String DATASTAX_PREFIX = "com.datastax.driver"; + + final String sql; // holds the sql statement if prepared statement + String[] params; // holds the parameters if prepared statement, indexing starts at 1 + + + public MdbcPreparedStatement(Statement stmt, MusicSqlManager m) { + super(stmt, m); + this.sql = null; + } + + public MdbcPreparedStatement(Statement stmt, String sql, MusicSqlManager mgr) { + super(stmt, sql, mgr); + this.sql = sql; + //indexing starts at 1 + params = new String[StringUtils.countMatches(sql, "?")+1]; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return stmt.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return stmt.isWrapperFor(iface); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql); + ResultSet r = null; + try { + mgr.preStatementHook(sql); + r = stmt.executeQuery(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return r; + } + + @Override + public int executeUpdate(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public void close() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: "); + stmt.close(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize"); + return stmt.getMaxFieldSize(); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + stmt.setMaxFieldSize(max); + } + + @Override + public int getMaxRows() throws SQLException { + return stmt.getMaxRows(); + } + + @Override + public void setMaxRows(int max) throws SQLException { + stmt.setMaxRows(max); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + stmt.setEscapeProcessing(enable); + } + + @Override + public int getQueryTimeout() throws SQLException { + return stmt.getQueryTimeout(); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds); + stmt.setQueryTimeout(seconds); + } + + @Override + public void cancel() throws SQLException { + stmt.cancel(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return stmt.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + stmt.clearWarnings(); + } + + @Override + public void setCursorName(String name) throws SQLException { + stmt.setCursorName(name); + } + + @Override + public boolean execute(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e); + // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. + boolean ignore = nm.startsWith(DATASTAX_PREFIX); +// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); + if (ignore) { + logger.warn("execute: exception (IGNORED) "+nm); + } else { + logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e); + throw e; + } + } + return b; + } + + @Override + public ResultSet getResultSet() throws SQLException { + return stmt.getResultSet(); + } + + @Override + public int getUpdateCount() throws SQLException { + return stmt.getUpdateCount(); + } + + @Override + public boolean getMoreResults() throws SQLException { + return stmt.getMoreResults(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + stmt.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return stmt.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + stmt.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return stmt.getFetchSize(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return stmt.getResultSetConcurrency(); + } + + @Override + public int getResultSetType() throws SQLException { + return stmt.getResultSetType(); + } + + @Override + public void addBatch(String sql) throws SQLException { + stmt.addBatch(sql); + } + + @Override + public void clearBatch() throws SQLException { + stmt.clearBatch(); + } + + @Override + public int[] executeBatch() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: "); + int[] n = null; + try { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result."); + n = stmt.executeBatch(); + synchronizeTables(null); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public Connection getConnection() throws SQLException { + return stmt.getConnection(); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return stmt.getMoreResults(current); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return stmt.getGeneratedKeys(); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, autoGeneratedKeys); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, columnIndexes); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, columnNames); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, autoGeneratedKeys); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, columnIndexes); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, columnNames); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return stmt.getResultSetHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return stmt.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + stmt.setPoolable(poolable); + } + + @Override + public boolean isPoolable() throws SQLException { + return stmt.isPoolable(); + } + + @Override + public void closeOnCompletion() throws SQLException { + stmt.closeOnCompletion(); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return stmt.isCloseOnCompletion(); + } + + @Override + public ResultSet executeQuery() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql); + ResultSet r = null; + try { + mgr.preStatementHook(sql); + r = ((PreparedStatement)stmt).executeQuery();; + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + e.printStackTrace(); + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeQuery: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return r; + } + + @Override + public int executeUpdate() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = ((PreparedStatement)stmt).executeUpdate(); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + e.printStackTrace(); + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + ((PreparedStatement)stmt).setNull(parameterIndex, sqlType); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + ((PreparedStatement)stmt).setBoolean(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + ((PreparedStatement)stmt).setByte(parameterIndex, x); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + ((PreparedStatement)stmt).setShort(parameterIndex, x); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + ((PreparedStatement)stmt).setInt(parameterIndex, x); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + ((PreparedStatement)stmt).setLong(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + ((PreparedStatement)stmt).setFloat(parameterIndex, x); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + ((PreparedStatement)stmt).setDouble(parameterIndex, x); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + ((PreparedStatement)stmt).setString(parameterIndex, x); + params[parameterIndex] = x; + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + ((PreparedStatement)stmt).setBytes(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + ((PreparedStatement)stmt).setDate(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + ((PreparedStatement)stmt).setTime(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + ((PreparedStatement)stmt).setTimestamp(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length); + } + + @SuppressWarnings("deprecation") + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length); + } + + @Override + public void clearParameters() throws SQLException { + ((PreparedStatement)stmt).clearParameters(); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + ((PreparedStatement)stmt).setObject(parameterIndex, x); + } + + @Override + public boolean execute() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = ((PreparedStatement)stmt).execute(); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + e.printStackTrace(); + String nm = e.getClass().getName(); + // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. + boolean ignore = nm.startsWith(DATASTAX_PREFIX); +// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); + if (ignore) { + logger.warn("execute: exception (IGNORED) "+nm); + } else { + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + throw e; + } + } + return b; + } + + @Override + public void addBatch() throws SQLException { + ((PreparedStatement)stmt).addBatch(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + ((PreparedStatement)stmt).setRef(parameterIndex, x); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + ((PreparedStatement)stmt).setBlob(parameterIndex, x); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + ((PreparedStatement)stmt).setClob(parameterIndex, x); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + ((PreparedStatement)stmt).setArray(parameterIndex, x); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return ((PreparedStatement)stmt).getMetaData(); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + ((PreparedStatement)stmt).setDate(parameterIndex, x, cal); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + ((PreparedStatement)stmt).setTime(parameterIndex, x, cal); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + ((CallableStatement)stmt).setURL(parameterIndex, x); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return ((CallableStatement)stmt).getParameterMetaData(); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + ((CallableStatement)stmt).setRowId(parameterIndex, x); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + ((CallableStatement)stmt).setNString(parameterIndex, value); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setClob(parameterIndex, reader, length); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, reader, length); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterIndex, x); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterIndex, x); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setClob(parameterIndex, reader); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterIndex, inputStream); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, reader); + } + +} diff --git a/src/main/java/org/onap/music/mdbc/MdbcServer.java b/src/main/java/org/onap/music/mdbc/MdbcServer.java new file mode 100644 index 0000000..4f83a54 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MdbcServer.java @@ -0,0 +1,162 @@ +package org.onap.music.mdbc; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.onap.music.mdbc.configurations.NodeConfiguration; +import org.apache.calcite.avatica.remote.Driver.Serialization; +import org.apache.calcite.avatica.remote.LocalService; +import org.apache.calcite.avatica.server.HttpServer; +import org.apache.calcite.avatica.util.Unsafe; + +import org.onap.music.logging.EELFLoggerDelegate; +import com.beust.jcommander.IStringConverter; +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +import java.util.Locale; +import java.util.Properties; + +public class MdbcServer { + public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(MdbcStatement.class); + + @Parameter(names = { "-c", "--configuration" }, required = true, + description = "This is the file that contains the ranges that are assigned to this MDBC server") + private String configurationFile; + + @Parameter(names = { "-u", "--url" }, required = true, + description = "JDBC driver url for the server") + private String url; + + @Parameter(names = { "-p", "--port" }, required = true, + description = "Port the server should bind") + private int port; + + @Parameter(names = { "-s", "--user" }, required = true, + description = "Mysql usr") + private String user; + + @Parameter(names = { "-a", "--pass" }, required = true, + description = "Mysql password") + private String password; + + final private Serialization serialization = Serialization.PROTOBUF; + + @Parameter(names = { "-h", "-help", "--help" }, help = true, + description = "Print the help message") + private boolean help = false; + + private NodeConfiguration config; + private HttpServer server; + + public void start() { + if (null != server) { + LOG.error("The server was already started"); + Unsafe.systemExit(ExitCodes.ALREADY_STARTED.ordinal()); + return; + } + + try { + config = NodeConfiguration.readJsonFromFile(configurationFile); + //\TODO Add configuration file with Server Info + Properties connectionProps = new Properties(); + connectionProps.put("user", user); + connectionProps.put("password", password); + MdbcServerLogic meta = new MdbcServerLogic(url,connectionProps,config); + LocalService service = new LocalService(meta); + + // Construct the server + this.server = new HttpServer.Builder<>() + .withHandler(service, serialization) + .withPort(port) + .build(); + + // Then start it + server.start(); + + LOG.info("Started Avatica server on port {} with serialization {}", server.getPort(), + serialization); + } catch (Exception e) { + LOG.error("Failed to start Avatica server", e); + Unsafe.systemExit(ExitCodes.START_FAILED.ordinal()); + } + } + + public void stop() { + if (null != server) { + server.stop(); + server = null; + } + } + + public void join() throws InterruptedException { + server.join(); + } + + public static void main(String[] args) { + final MdbcServer server = new MdbcServer(); + @SuppressWarnings("deprecation") + JCommander jc = new JCommander(server, args); + if (server.help) { + jc.usage(); + Unsafe.systemExit(ExitCodes.USAGE.ordinal()); + return; + } + + server.start(); + + // Try to clean up when the server is stopped. + Runtime.getRuntime().addShutdownHook( + new Thread(new Runnable() { + @Override public void run() { + LOG.info("Stopping server"); + server.stop(); + LOG.info("Server stopped"); + } + })); + + try { + server.join(); + } catch (InterruptedException e) { + // Reset interruption + Thread.currentThread().interrupt(); + // And exit now. + return; + } + } + + /** + * Converter from String to Serialization. Must be public for JCommander. + */ + public static class SerializationConverter implements IStringConverter { + @Override public Serialization convert(String value) { + return Serialization.valueOf(value.toUpperCase(Locale.ROOT)); + } + } + + /** + * Codes for exit conditions + */ + private enum ExitCodes { + NORMAL, + ALREADY_STARTED, // 1 + START_FAILED, // 2 + USAGE; // 3 + } +} + +// End StandaloneServer.java diff --git a/src/main/java/org/onap/music/mdbc/MdbcServerLogic.java b/src/main/java/org/onap/music/mdbc/MdbcServerLogic.java new file mode 100644 index 0000000..a1984c2 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MdbcServerLogic.java @@ -0,0 +1,312 @@ +package org.onap.music.mdbc; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.mdbc.configurations.NodeConfiguration; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import org.apache.calcite.avatica.MissingResultsException; +import org.apache.calcite.avatica.NoSuchStatementException; +import org.apache.calcite.avatica.jdbc.JdbcMeta; +import org.apache.calcite.avatica.remote.TypedValue; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.logging.format.AppMessages; +import org.onap.music.logging.format.ErrorSeverity; +import org.onap.music.logging.format.ErrorTypes; + +public class MdbcServerLogic extends JdbcMeta{ + + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcServerLogic.class); + + StateManager manager; + DatabasePartition ranges; + String name; + String sqlDatabase; + + //TODO: Delete this properties after debugging + private final Properties info; + private final Cache connectionCache; + + public MdbcServerLogic(String Url, Properties info,NodeConfiguration config) throws SQLException, MDBCServiceException { + super(Url,info); + this.ranges = config.partition; + this.name = config.nodeName; + this.sqlDatabase = config.sqlDatabaseName; + this.manager = new StateManager(Url,info,this.ranges,this.sqlDatabase); + this.info = info; + int concurrencyLevel = Integer.parseInt( + info.getProperty(ConnectionCacheSettings.CONCURRENCY_LEVEL.key(), + ConnectionCacheSettings.CONCURRENCY_LEVEL.defaultValue())); + int initialCapacity = Integer.parseInt( + info.getProperty(ConnectionCacheSettings.INITIAL_CAPACITY.key(), + ConnectionCacheSettings.INITIAL_CAPACITY.defaultValue())); + long maxCapacity = Long.parseLong( + info.getProperty(ConnectionCacheSettings.MAX_CAPACITY.key(), + ConnectionCacheSettings.MAX_CAPACITY.defaultValue())); + long connectionExpiryDuration = Long.parseLong( + info.getProperty(ConnectionCacheSettings.EXPIRY_DURATION.key(), + ConnectionCacheSettings.EXPIRY_DURATION.defaultValue())); + TimeUnit connectionExpiryUnit = TimeUnit.valueOf( + info.getProperty(ConnectionCacheSettings.EXPIRY_UNIT.key(), + ConnectionCacheSettings.EXPIRY_UNIT.defaultValue())); + this.connectionCache = CacheBuilder.newBuilder() + .concurrencyLevel(concurrencyLevel) + .initialCapacity(initialCapacity) + .maximumSize(maxCapacity) + .expireAfterAccess(connectionExpiryDuration, connectionExpiryUnit) + .removalListener(new ConnectionExpiryHandler()) + .build(); + } + + @Override + protected Connection getConnection(String id) throws SQLException { + if (id == null) { + throw new NullPointerException("Connection id is null"); + } + //\TODO: don't use connectionCache, use this.manager internal state + Connection conn = connectionCache.getIfPresent(id); + if (conn == null) { + this.manager.CloseConnection(id); + logger.error(EELFLoggerDelegate.errorLogger,"Connection not found: invalid id, closed, or expired: " + + id); + throw new RuntimeException(" Connection not found: invalid id, closed, or expired: " + id); + } + return conn; + } + + @Override + public void openConnection(ConnectionHandle ch, Map information) { + Properties fullInfo = new Properties(); + fullInfo.putAll(this.info); + if (information != null) { + fullInfo.putAll(information); + } + + final ConcurrentMap cacheAsMap = this.connectionCache.asMap(); + if (cacheAsMap.containsKey(ch.id)) { + throw new RuntimeException("Connection already exists: " + ch.id); + } + // Avoid global synchronization of connection opening + try { + this.manager.OpenConnection(ch.id, info); + Connection conn = this.manager.GetConnection(ch.id); + if(conn == null) { + logger.error(EELFLoggerDelegate.errorLogger, "Connection created was null"); + throw new RuntimeException("Connection created was null for connection: " + ch.id); + } + Connection loadedConn = cacheAsMap.putIfAbsent(ch.id, conn); + logger.info("connection created with id {}", ch.id); + // Race condition: someone beat us to storing the connection in the cache. + if (loadedConn != null) { + //\TODO check if we added an additional race condition for this + this.manager.CloseConnection(ch.id); + conn.close(); + throw new RuntimeException("Connection already exists: " + ch.id); + } + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw new RuntimeException(e); + } + } + + @Override + public void closeConnection(ConnectionHandle ch) { + //\TODO use state connection instead + Connection conn = connectionCache.getIfPresent(ch.id); + if (conn == null) { + logger.debug("client requested close unknown connection {}", ch); + return; + } + logger.trace("closing connection {}", ch); + try { + conn.close(); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw new RuntimeException(e.getMessage()); + } finally { + connectionCache.invalidate(ch.id); + this.manager.CloseConnection(ch.id); + logger.info("connection closed with id {}", ch.id); + } + } + + @Override + public void commit(ConnectionHandle ch) { + try { + super.commit(ch); + logger.debug("connection commited with id {}", ch.id); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + } + + //\TODO All the following functions can be deleted + // Added for two reasons: debugging and logging + @Override + public StatementHandle prepare(ConnectionHandle ch, String sql, long maxRowCount) { + StatementHandle h; + try { + h = super.prepare(ch, sql, maxRowCount); + logger.debug("prepared statement {}", h); + } catch (Exception e ) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(e); + } + return h; + } + + @Override + public ExecuteResult prepareAndExecute(StatementHandle h, String sql, long maxRowCount, int maxRowsInFirstFrame, + PrepareCallback callback) throws NoSuchStatementException { + ExecuteResult e; + try { + e = super.prepareAndExecute(h, sql, maxRowCount,maxRowsInFirstFrame,callback); + logger.debug("prepare and execute statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return e; + } + + @Override + public ExecuteBatchResult prepareAndExecuteBatch(StatementHandle h, List sqlCommands) + throws NoSuchStatementException { + ExecuteBatchResult e; + try { + e = super.prepareAndExecuteBatch(h, sqlCommands); + logger.debug("prepare and execute batch statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return e; + } + + @Override + public ExecuteBatchResult executeBatch(StatementHandle h, List> parameterValues) + throws NoSuchStatementException { + ExecuteBatchResult e; + try { + e = super.executeBatch(h, parameterValues); + logger.debug("execute batch statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return e; + } + + @Override + public Frame fetch(StatementHandle h, long offset, int fetchMaxRowCount) + throws NoSuchStatementException, MissingResultsException { + Frame f; + try { + f = super.fetch(h, offset, fetchMaxRowCount); + logger.debug("fetch statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return f; + } + + @Override + public ExecuteResult execute(StatementHandle h, List parameterValues, long maxRowCount) + throws NoSuchStatementException { + ExecuteResult e; + try { + e = super.execute(h, parameterValues, maxRowCount); + logger.debug("fetch statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return e; + } + + @Override + public ExecuteResult execute(StatementHandle h, List parameterValues, int maxRowsInFirstFrame) + throws NoSuchStatementException { + ExecuteResult e; + try { + e = super.execute(h, parameterValues, maxRowsInFirstFrame); + logger.debug("fetch statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return e; + } + + @Override + public StatementHandle createStatement(ConnectionHandle ch) { + StatementHandle h; + try { + h = super.createStatement(ch); + logger.debug("create statement {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + return h; + } + + @Override + public void closeStatement(StatementHandle h) { + try { + super.closeStatement(h); + logger.debug("statement closed {}", h); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + } + + + + + + + + @Override + public void rollback(ConnectionHandle ch) { + try { + super.rollback(ch); + logger.debug("connection rollback with id {}", ch.id); + } catch (Exception err ) { + logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw(err); + } + } + + private class ConnectionExpiryHandler + implements RemovalListener { + + public void onRemoval(RemovalNotification notification) { + String connectionId = notification.getKey(); + Connection doomed = notification.getValue(); + logger.debug("Expiring connection {} because {}", connectionId, notification.getCause()); + try { + if (doomed != null) { + doomed.close(); + } + } catch (Throwable t) { + logger.warn("Exception thrown while expiring connection {}", connectionId, t); + } + } + } +} + + diff --git a/src/main/java/org/onap/music/mdbc/MdbcStatement.java b/src/main/java/org/onap/music/mdbc/MdbcStatement.java new file mode 100644 index 0000000..93fe80a --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MdbcStatement.java @@ -0,0 +1,416 @@ +package org.onap.music.mdbc; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; + +import org.onap.music.exceptions.QueryException; +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.logging.format.AppMessages; +import org.onap.music.logging.format.ErrorSeverity; +import org.onap.music.logging.format.ErrorTypes; + +/** + * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, + * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. + * + * @author Robert Eby + */ +public class MdbcStatement implements Statement { + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcStatement.class); + private static final String DATASTAX_PREFIX = "com.datastax.driver"; + + final Statement stmt; // the Statement that we are proxying + final MusicSqlManager mgr; + //\TODO We may need to all pass the connection object to support autocommit + + public MdbcStatement(Statement s, MusicSqlManager m) { + this.stmt = s; + this.mgr = m; + } + + public MdbcStatement(Statement stmt, String sql, MusicSqlManager mgr) { + //\TODO why there is a constructor with a sql parameter in a not PreparedStatement + this.stmt = stmt; + this.mgr = mgr; + } + + @Override + public T unwrap(Class iface) throws SQLException { + logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName()); + return stmt.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName()); + return stmt.isWrapperFor(iface); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql); + ResultSet r = null; + try { + mgr.preStatementHook(sql); + r = stmt.executeQuery(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return r; + } + + @Override + public int executeUpdate(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public void close() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: "); + stmt.close(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize"); + return stmt.getMaxFieldSize(); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + stmt.setMaxFieldSize(max); + } + + @Override + public int getMaxRows() throws SQLException { + return stmt.getMaxRows(); + } + + @Override + public void setMaxRows(int max) throws SQLException { + stmt.setMaxRows(max); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + stmt.setEscapeProcessing(enable); + } + + @Override + public int getQueryTimeout() throws SQLException { + return stmt.getQueryTimeout(); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + //\TODO: we also need to implement a higher level timeout in MDBC + logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds); + stmt.setQueryTimeout(seconds); + } + + @Override + public void cancel() throws SQLException { + stmt.cancel(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return stmt.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + stmt.clearWarnings(); + } + + @Override + public void setCursorName(String name) throws SQLException { + stmt.setCursorName(name); + } + + @Override + public boolean execute(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + //\TODO Add the result of the postStatementHook to b + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e); + // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. + boolean ignore = nm.startsWith(DATASTAX_PREFIX); +// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); + if (ignore) { + logger.warn("execute: exception (IGNORED) "+nm); + } else { + logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e); + throw e; + } + } + return b; + } + + @Override + public ResultSet getResultSet() throws SQLException { + return stmt.getResultSet(); + } + + @Override + public int getUpdateCount() throws SQLException { + return stmt.getUpdateCount(); + } + + @Override + public boolean getMoreResults() throws SQLException { + return stmt.getMoreResults(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + stmt.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return stmt.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + stmt.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return stmt.getFetchSize(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return stmt.getResultSetConcurrency(); + } + + @Override + public int getResultSetType() throws SQLException { + return stmt.getResultSetType(); + } + + @Override + public void addBatch(String sql) throws SQLException { + stmt.addBatch(sql); + } + + @Override + public void clearBatch() throws SQLException { + stmt.clearBatch(); + } + + @Override + public int[] executeBatch() throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: "); + int[] n = null; + try { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result."); + n = stmt.executeBatch(); + synchronizeTables(null); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public Connection getConnection() throws SQLException { + return stmt.getConnection(); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return stmt.getMoreResults(current); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return stmt.getGeneratedKeys(); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, autoGeneratedKeys); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, columnIndexes); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, columnNames); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, autoGeneratedKeys); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, columnIndexes); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql); + //\TODO Idem to the other execute without columnNames + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, columnNames); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return stmt.getResultSetHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return stmt.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + stmt.setPoolable(poolable); + } + + @Override + public boolean isPoolable() throws SQLException { + return stmt.isPoolable(); + } + + @Override + public void closeOnCompletion() throws SQLException { + stmt.closeOnCompletion(); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return stmt.isCloseOnCompletion(); + } + + protected void synchronizeTables(String sql) { + if (sql == null || sql.trim().toLowerCase().startsWith("create")) { + if (mgr != null) { + try { + mgr.synchronizeTables(); + } catch (QueryException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + } + } + } + } +} diff --git a/src/main/java/org/onap/music/mdbc/MusicSqlManager.java b/src/main/java/org/onap/music/mdbc/MusicSqlManager.java new file mode 100755 index 0000000..741ee9e --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/MusicSqlManager.java @@ -0,0 +1,308 @@ +package org.onap.music.mdbc; + +import java.sql.Connection; +import java.util.*; + +import org.json.JSONObject; + +import org.onap.music.mdbc.mixins.DBInterface; +import org.onap.music.mdbc.mixins.MixinFactory; +import org.onap.music.mdbc.mixins.MusicInterface; +import org.onap.music.mdbc.mixins.Utils; +import org.onap.music.mdbc.tables.StagingTable; +import org.onap.music.mdbc.tables.TxCommitProgress; +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.exceptions.QueryException; +import org.onap.music.logging.*; +import org.onap.music.logging.format.AppMessages; +import org.onap.music.logging.format.ErrorSeverity; +import org.onap.music.logging.format.ErrorTypes; + +/** +*

+* MUSIC SQL Manager - code that helps take data written to a SQL database and seamlessly integrates it +* with MUSIC that maintains data in a No-SQL data-store +* (Cassandra) and protects access to it with a distributed +* locking service (based on Zookeeper). +*

+*

+* This code will support transactions by taking note of the value of the autoCommit flag, and of calls +* to commit() and rollback(). These calls should be made by the user's JDBC +* client. +*

+* +* @author Bharath Balasubramanian, Robert Eby +*/ +public class MusicSqlManager { + + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicSqlManager.class); + + private final DBInterface dbi; + private final MusicInterface mi; + private final Set table_set; + private final HashMap transactionDigest; + private boolean autocommit; // a copy of the autocommit flag from the JDBC Connection + + /** + * Build a MusicSqlManager for a DB connection. This construct may only be called by getMusicSqlManager(), + * which will ensure that only one MusicSqlManager is created per URL. + * This is the location where the appropriate mixins to use for the MusicSqlManager should be determined. + * They should be picked based upon the URL and the properties passed to this constructor. + *

+ * At the present time, we only support the use of the H2Mixin (for access to a local H2 database), + * with the CassandraMixin (for direct access to a Cassandra noSQL DB as the persistence layer). + *

+ * + * @param url the JDBC URL which was used to connection to the database + * @param conn the actual connection to the database + * @param info properties passed from the initial JDBC connect() call + * @throws MDBCServiceException + */ + public MusicSqlManager(String url, Connection conn, Properties info, MusicInterface mi) throws MDBCServiceException { + try { + info.putAll(Utils.getMdbcProperties()); + String mixinDb = info.getProperty(Configuration.KEY_DB_MIXIN_NAME, Configuration.DB_MIXIN_DEFAULT); + this.dbi = MixinFactory.createDBInterface(mixinDb, this, url, conn, info); + this.mi = mi; + this.table_set = Collections.synchronizedSet(new HashSet()); + this.autocommit = true; + this.transactionDigest = new HashMap(); + + }catch(Exception e) { + throw new MDBCServiceException(e.getMessage()); + } + } + + public void setAutoCommit(boolean b,String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException { + if (b != autocommit) { + autocommit = b; + logger.debug(EELFLoggerDelegate.applicationLogger,"autocommit changed to "+b); + if (b) { + // My reading is that turning autoCOmmit ON should automatically commit any outstanding transaction + if(txId == null || txId.isEmpty()) { + logger.error(EELFLoggerDelegate.errorLogger, "Connection ID is null",AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + throw new MDBCServiceException("tx id is null"); + } + commit(txId,progressKeeper,partition); + } + } + } + + /** + * Close this MusicSqlManager. + */ + public void close() { + if (dbi != null) { + dbi.close(); + } + } + + /** + * Code to be run within the DB driver before a SQL statement is executed. This is where tables + * can be synchronized before a SELECT, for those databases that do not support SELECT triggers. + * @param sql the SQL statement that is about to be executed + */ + public void preStatementHook(final String sql) { + dbi.preStatementHook(sql); + } + /** + * Code to be run within the DB driver after a SQL statement has been executed. This is where remote + * statement actions can be copied back to Cassandra/MUSIC. + * @param sql the SQL statement that was executed + */ + public void postStatementHook(final String sql) { + dbi.postStatementHook(sql,transactionDigest); + } + /** + * Synchronize the list of tables in SQL with the list in MUSIC. This function should be called when the + * proxy first starts, and whenever there is the possibility that tables were created or dropped. It is synchronized + * in order to prevent multiple threads from running this code in parallel. + */ + public synchronized void synchronizeTables() throws QueryException { + Set set1 = dbi.getSQLTableSet(); // set of tables in the database + logger.debug(EELFLoggerDelegate.applicationLogger, "synchronizing tables:" + set1); + for (String tableName : set1) { + // This map will be filled in if this table was previously discovered + if (!table_set.contains(tableName) && !dbi.getReservedTblNames().contains(tableName)) { + logger.info(EELFLoggerDelegate.applicationLogger, "New table discovered: "+tableName); + try { + TableInfo ti = dbi.getTableInfo(tableName); + mi.initializeMusicForTable(ti,tableName); + //\TODO Verify if table info can be modify in the previous step, if not this step can be deleted + ti = dbi.getTableInfo(tableName); + mi.createDirtyRowTable(ti,tableName); + dbi.createSQLTriggers(tableName); + table_set.add(tableName); + synchronizeTableData(tableName); + logger.debug(EELFLoggerDelegate.applicationLogger, "synchronized tables:" + + table_set.size() + "/" + set1.size() + "tables uploaded"); + } catch (Exception e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + //logger.error(EELFLoggerDelegate.errorLogger, "Exception synchronizeTables: "+e); + throw new QueryException(); + } + } + } + +// Set set2 = getMusicTableSet(music_ns); + // not working - fix later +// for (String tbl : set2) { +// if (!set1.contains(tbl)) { +// logger.debug("Old table dropped: "+tbl); +// dropSQLTriggers(tbl, conn); +// // ZZTODO drop camunda table ? +// } +// } + } + + /** + * On startup, copy dirty data from Cassandra to H2. May not be needed. + * @param tableName + */ + public void synchronizeTableData(String tableName) { + // TODO - copy MUSIC -> H2 + dbi.synchronizeData(tableName); + } + /** + * This method is called whenever there is a SELECT on a local SQL table, and should be called by the underlying databases + * triggering mechanism. It first checks the local dirty bits table to see if there are any keys in Cassandra whose value + * has not yet been sent to SQL. If there are, the appropriate values are copied from Cassandra to the local database. + * Under normal execution, this function behaves as a NOP operation. + * @param tableName This is the table on which the SELECT is being performed + */ + public void readDirtyRowsAndUpdateDb(String tableName) { + mi.readDirtyRowsAndUpdateDb(dbi,tableName); + } + + + + + /** + * This method gets the primary key that the music interfaces uses by default. + * If the front end uses a primary key, this will not match what is used in the MUSIC interface + * @return + */ + public String getMusicDefaultPrimaryKeyName() { + return mi.getMusicDefaultPrimaryKeyName(); + } + + /** + * Asks music interface to provide the function to create a primary key + * e.g. uuid(), 1, "unique_aksd419fjc" + * @return + */ + public String generateUniqueKey() { + // + return mi.generateUniqueKey(); + } + + + /** + * Perform a commit, as requested by the JDBC driver. If any row updates have been delayed, + * they are performed now and copied into MUSIC. + * @throws MDBCServiceException + */ + public synchronized void commit(String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException { + logger.debug(EELFLoggerDelegate.applicationLogger, " commit "); + // transaction was committed -- add all the updates into the REDO-Log in MUSIC + try { + mi.commitLog(dbi, partition, transactionDigest, txId, progressKeeper); + }catch(MDBCServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL); + throw e; + } + } + + /** + * Perform a rollback, as requested by the JDBC driver. If any row updates have been delayed, + * they are discarded. + */ + public synchronized void rollback() { + // transaction was rolled back - discard the updates + logger.debug(EELFLoggerDelegate.applicationLogger, "Rollback");; + transactionDigest.clear(); + } + + /** + * Get all + * @param table + * @param dbRow + * @return + */ + public String getMusicKeyFromRowWithoutPrimaryIndexes(String table, JSONObject dbRow) { + TableInfo ti = dbi.getTableInfo(table); + return mi.getMusicKeyFromRowWithoutPrimaryIndexes(ti,table, dbRow); + } + + public String getMusicKeyFromRow(String table, JSONObject dbRow) { + TableInfo ti = dbi.getTableInfo(table); + return mi.getMusicKeyFromRow(ti,table, dbRow); + } + + /** + * Returns all keys that matches the current sql statement, and not in already updated keys. + * + * @param sql the query that we are getting keys for + * @deprecated + */ + public ArrayList getMusicKeys(String sql) { + ArrayList musicKeys = new ArrayList(); + //\TODO See if this is required + /* + try { + net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql); + if (stmt instanceof Insert) { + Insert s = (Insert) stmt; + String tbl = s.getTable().getName(); + musicKeys.add(generatePrimaryKey()); + } else { + String tbl; + String where = ""; + if (stmt instanceof Update){ + Update u = (Update) stmt; + tbl = u.getTables().get(0).getName(); + where = u.getWhere().toString(); + } else if (stmt instanceof Delete) { + Delete d = (Delete) stmt; + tbl = d.getTable().getName(); + if (d.getWhere()!=null) { + where = d.getWhere().toString(); + } + } else { + System.err.println("Not recognized sql type"); + tbl = ""; + } + String dbiSelect = "SELECT * FROM " + tbl; + if (!where.equals("")) { + dbiSelect += "WHERE" + where; + } + ResultSet rs = dbi.executeSQLRead(dbiSelect); + musicKeys.addAll(getMusicKeysWhere(tbl, Utils.parseResults(dbi.getTableInfo(tbl), rs))); + rs.getStatement().close(); + } + } catch (JSQLParserException | SQLException e) { + + e.printStackTrace(); + } + System.err.print("MusicKeys:"); + for(String musicKey:musicKeys) { + System.out.print(musicKey + ","); + } + */ + return musicKeys; + } + + public void own(List ranges) { + throw new java.lang.UnsupportedOperationException("function not implemented yet"); + } + + public void appendRange(String rangeId, List ranges) { + throw new java.lang.UnsupportedOperationException("function not implemented yet"); + } + + public void relinquish(String ownerId, String rangeId) { + throw new java.lang.UnsupportedOperationException("function not implemented yet"); + } + + +} diff --git a/src/main/java/org/onap/music/mdbc/ProxyStatement.java b/src/main/java/org/onap/music/mdbc/ProxyStatement.java new file mode 100755 index 0000000..e84dc7b --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/ProxyStatement.java @@ -0,0 +1,1262 @@ +package org.onap.music.mdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.Map; + +import org.apache.log4j.Logger; + +import org.onap.music.exceptions.QueryException; + +/** + * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through, + * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped. + * + * @author Robert Eby + */ +public class ProxyStatement implements CallableStatement { + private static final Logger logger = Logger.getLogger(ProxyStatement.class); + private static final String DATASTAX_PREFIX = "com.datastax.driver"; + + private final Statement stmt; // the Statement that we are proxying + private final MusicSqlManager mgr; + + public ProxyStatement(Statement s, MusicSqlManager m) { + this.stmt = s; + this.mgr = m; + } + + @Override + public T unwrap(Class iface) throws SQLException { + return stmt.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return stmt.isWrapperFor(iface); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + logger.debug("executeQuery: "+sql); + ResultSet r = null; + try { + mgr.preStatementHook(sql); + r = stmt.executeQuery(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("executeQuery: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return r; + } + + @Override + public int executeUpdate(String sql) throws SQLException { + logger.debug("executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public void close() throws SQLException { + stmt.close(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + return stmt.getMaxFieldSize(); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + stmt.setMaxFieldSize(max); + } + + @Override + public int getMaxRows() throws SQLException { + return stmt.getMaxRows(); + } + + @Override + public void setMaxRows(int max) throws SQLException { + stmt.setMaxRows(max); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + stmt.setEscapeProcessing(enable); + } + + @Override + public int getQueryTimeout() throws SQLException { + return stmt.getQueryTimeout(); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + stmt.setQueryTimeout(seconds); + } + + @Override + public void cancel() throws SQLException { + stmt.cancel(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return stmt.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + stmt.clearWarnings(); + } + + @Override + public void setCursorName(String name) throws SQLException { + stmt.setCursorName(name); + } + + @Override + public boolean execute(String sql) throws SQLException { + logger.debug("execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now. + boolean ignore = nm.startsWith(DATASTAX_PREFIX); +// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists")); + if (ignore) { + logger.warn("execute: exception (IGNORED) "+nm); + } else { + logger.warn("execute: exception "+nm); + throw e; + } + } + return b; + } + + @Override + public ResultSet getResultSet() throws SQLException { + return stmt.getResultSet(); + } + + @Override + public int getUpdateCount() throws SQLException { + return stmt.getUpdateCount(); + } + + @Override + public boolean getMoreResults() throws SQLException { + return stmt.getMoreResults(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + stmt.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return stmt.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + stmt.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return stmt.getFetchSize(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return stmt.getResultSetConcurrency(); + } + + @Override + public int getResultSetType() throws SQLException { + return stmt.getResultSetType(); + } + + @Override + public void addBatch(String sql) throws SQLException { + stmt.addBatch(sql); + } + + @Override + public void clearBatch() throws SQLException { + stmt.clearBatch(); + } + + @Override + public int[] executeBatch() throws SQLException { + logger.debug("executeBatch"); + int[] n = null; + try { + logger.warn("executeBatch() is not supported by MDBC; your results may be incorrect as a result."); + n = stmt.executeBatch(); + synchronizeTables(null); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("executeBatch: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public Connection getConnection() throws SQLException { + return stmt.getConnection(); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return stmt.getMoreResults(current); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return stmt.getGeneratedKeys(); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + logger.debug("executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, autoGeneratedKeys); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + logger.debug("executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, columnIndexes); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + logger.debug("executeUpdate: "+sql); + int n = 0; + try { + mgr.preStatementHook(sql); + n = stmt.executeUpdate(sql, columnNames); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("executeUpdate: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return n; + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + logger.debug("execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, autoGeneratedKeys); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + logger.debug("execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, columnIndexes); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + logger.debug("execute: "+sql); + boolean b = false; + try { + mgr.preStatementHook(sql); + b = stmt.execute(sql, columnNames); + mgr.postStatementHook(sql); + synchronizeTables(sql); + } catch (Exception e) { + String nm = e.getClass().getName(); + logger.warn("execute: exception "+nm); + if (!nm.startsWith(DATASTAX_PREFIX)) + throw e; + } + return b; + } + + @Override + public int getResultSetHoldability() throws SQLException { + return stmt.getResultSetHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return stmt.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + stmt.setPoolable(poolable); + } + + @Override + public boolean isPoolable() throws SQLException { + return stmt.isPoolable(); + } + + @Override + public void closeOnCompletion() throws SQLException { + stmt.closeOnCompletion(); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return stmt.isCloseOnCompletion(); + } + + @Override + public ResultSet executeQuery() throws SQLException { + logger.debug("executeQuery"); + return ((PreparedStatement)stmt).executeQuery(); + } + + @Override + public int executeUpdate() throws SQLException { + logger.debug("executeUpdate"); + return ((PreparedStatement)stmt).executeUpdate(); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + ((PreparedStatement)stmt).setNull(parameterIndex, sqlType); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + ((PreparedStatement)stmt).setBoolean(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + ((PreparedStatement)stmt).setByte(parameterIndex, x); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + ((PreparedStatement)stmt).setShort(parameterIndex, x); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + ((PreparedStatement)stmt).setInt(parameterIndex, x); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + ((PreparedStatement)stmt).setLong(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + ((PreparedStatement)stmt).setFloat(parameterIndex, x); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + ((PreparedStatement)stmt).setDouble(parameterIndex, x); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + ((PreparedStatement)stmt).setString(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + ((PreparedStatement)stmt).setBytes(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + ((PreparedStatement)stmt).setDate(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + ((PreparedStatement)stmt).setTime(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + ((PreparedStatement)stmt).setTimestamp(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length); + } + + @SuppressWarnings("deprecation") + @Override + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length); + } + + @Override + public void clearParameters() throws SQLException { + ((PreparedStatement)stmt).clearParameters(); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + ((PreparedStatement)stmt).setObject(parameterIndex, x); + } + + @Override + public boolean execute() throws SQLException { + return ((PreparedStatement)stmt).execute(); + } + + @Override + public void addBatch() throws SQLException { + ((PreparedStatement)stmt).addBatch(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + ((PreparedStatement)stmt).setRef(parameterIndex, x); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + ((PreparedStatement)stmt).setBlob(parameterIndex, x); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + ((PreparedStatement)stmt).setClob(parameterIndex, x); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + ((PreparedStatement)stmt).setArray(parameterIndex, x); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return ((PreparedStatement)stmt).getMetaData(); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + ((PreparedStatement)stmt).setDate(parameterIndex, x, cal); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + ((PreparedStatement)stmt).setTime(parameterIndex, x, cal); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + ((CallableStatement)stmt).setURL(parameterIndex, x); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return ((CallableStatement)stmt).getParameterMetaData(); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + ((CallableStatement)stmt).setRowId(parameterIndex, x); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + ((CallableStatement)stmt).setNString(parameterIndex, value); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setClob(parameterIndex, reader, length); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, reader, length); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterIndex, x); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterIndex, x); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setClob(parameterIndex, reader); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterIndex, inputStream); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterIndex, reader); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale); + } + + @Override + public boolean wasNull() throws SQLException { + return ((CallableStatement)stmt).wasNull(); + } + + @Override + public String getString(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getString(parameterIndex); + } + + @Override + public boolean getBoolean(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBoolean(parameterIndex); + } + + @Override + public byte getByte(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getByte(parameterIndex); + } + + @Override + public short getShort(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getShort(parameterIndex); + } + + @Override + public int getInt(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getInt(parameterIndex); + } + + @Override + public long getLong(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getLong(parameterIndex); + } + + @Override + public float getFloat(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getFloat(parameterIndex); + } + + @Override + public double getDouble(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getDouble(parameterIndex); + } + + @SuppressWarnings("deprecation") + @Override + public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { + return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale); + } + + @Override + public byte[] getBytes(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBytes(parameterIndex); + } + + @Override + public Date getDate(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterIndex); + } + + @Override + public Time getTime(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterIndex); + } + + @Override + public Timestamp getTimestamp(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterIndex); + } + + @Override + public Object getObject(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterIndex); + } + + @Override + public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBigDecimal(parameterIndex); + } + + @Override + public Object getObject(int parameterIndex, Map> map) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterIndex, map); + } + + @Override + public Ref getRef(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getRef(parameterIndex); + } + + @Override + public Blob getBlob(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getBlob(parameterIndex); + } + + @Override + public Clob getClob(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getClob(parameterIndex); + } + + @Override + public Array getArray(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getArray(parameterIndex); + } + + @Override + public Date getDate(int parameterIndex, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterIndex, cal); + } + + @Override + public Time getTime(int parameterIndex, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterIndex, cal); + } + + @Override + public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal); + } + + @Override + public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale); + } + + @Override + public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName); + } + + @Override + public URL getURL(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getURL(parameterIndex); + } + + @Override + public void setURL(String parameterName, URL val) throws SQLException { + ((CallableStatement)stmt).setURL(parameterName, val); + } + + @Override + public void setNull(String parameterName, int sqlType) throws SQLException { + ((CallableStatement)stmt).setNull(parameterName, sqlType); + } + + @Override + public void setBoolean(String parameterName, boolean x) throws SQLException { + ((CallableStatement)stmt).setBoolean(parameterName, x); + } + + @Override + public void setByte(String parameterName, byte x) throws SQLException { + ((CallableStatement)stmt).setByte(parameterName, x); + } + + @Override + public void setShort(String parameterName, short x) throws SQLException { + ((CallableStatement)stmt).setShort(parameterName, x); + } + + @Override + public void setInt(String parameterName, int x) throws SQLException { + ((CallableStatement)stmt).setInt(parameterName, x); + } + + @Override + public void setLong(String parameterName, long x) throws SQLException { + ((CallableStatement)stmt).setLong(parameterName, x); + } + + @Override + public void setFloat(String parameterName, float x) throws SQLException { + ((CallableStatement)stmt).setFloat(parameterName, x); + } + + @Override + public void setDouble(String parameterName, double x) throws SQLException { + ((CallableStatement)stmt).setDouble(parameterName, x); + } + + @Override + public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException { + ((CallableStatement)stmt).setBigDecimal(parameterName, x); + } + + @Override + public void setString(String parameterName, String x) throws SQLException { + ((CallableStatement)stmt).setString(parameterName, x); + } + + @Override + public void setBytes(String parameterName, byte[] x) throws SQLException { + ((CallableStatement)stmt).setBytes(parameterName, x); + } + + @Override + public void setDate(String parameterName, Date x) throws SQLException { + ((CallableStatement)stmt).setDate(parameterName, x); + } + + @Override + public void setTime(String parameterName, Time x) throws SQLException { + ((CallableStatement)stmt).setTime(parameterName, x); + } + + @Override + public void setTimestamp(String parameterName, Timestamp x) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterName, x); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); + } + + @Override + public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException { + ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale); + } + + @Override + public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException { + ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType); + } + + @Override + public void setObject(String parameterName, Object x) throws SQLException { + ((CallableStatement)stmt).setObject(parameterName, x); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); + } + + @Override + public void setDate(String parameterName, Date x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setDate(parameterName, x, cal); + } + + @Override + public void setTime(String parameterName, Time x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTime(parameterName, x, cal); + } + + @Override + public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException { + ((CallableStatement)stmt).setTimestamp(parameterName, x, cal); + } + + @Override + public void setNull(String parameterName, int sqlType, String typeName) throws SQLException { + ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName); + } + + @Override + public String getString(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getString(parameterName); + } + + @Override + public boolean getBoolean(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBoolean(parameterName); + } + + @Override + public byte getByte(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getByte(parameterName); + } + + @Override + public short getShort(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getShort(parameterName); + } + + @Override + public int getInt(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getInt(parameterName); + } + + @Override + public long getLong(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getLong(parameterName); + } + + @Override + public float getFloat(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getFloat(parameterName); + } + + @Override + public double getDouble(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getDouble(parameterName); + } + + @Override + public byte[] getBytes(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBytes(parameterName); + } + + @Override + public Date getDate(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterName); + } + + @Override + public Time getTime(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterName); + } + + @Override + public Timestamp getTimestamp(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterName); + } + + @Override + public Object getObject(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterName); + } + + @Override + public BigDecimal getBigDecimal(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBigDecimal(parameterName); + } + + @Override + public Object getObject(String parameterName, Map> map) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterName, map); + } + + @Override + public Ref getRef(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getRef(parameterName); + } + + @Override + public Blob getBlob(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getBlob(parameterName); + } + + @Override + public Clob getClob(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getClob(parameterName); + } + + @Override + public Array getArray(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getArray(parameterName); + } + + @Override + public Date getDate(String parameterName, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getDate(parameterName, cal); + } + + @Override + public Time getTime(String parameterName, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTime(parameterName, cal); + } + + @Override + public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { + return ((CallableStatement)stmt).getTimestamp(parameterName, cal); + } + + @Override + public URL getURL(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getURL(parameterName); + } + + @Override + public RowId getRowId(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getRowId(parameterIndex); + } + + @Override + public RowId getRowId(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getRowId(parameterName); + } + + @Override + public void setRowId(String parameterName, RowId x) throws SQLException { + ((CallableStatement)stmt).setRowId(parameterName, x); + } + + @Override + public void setNString(String parameterName, String value) throws SQLException { + ((CallableStatement)stmt).setNString(parameterName, value); + } + + @Override + public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length); + } + + @Override + public void setNClob(String parameterName, NClob value) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterName, value); + } + + @Override + public void setClob(String parameterName, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setClob(parameterName, reader, length); + } + + @Override + public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterName, inputStream, length); + } + + @Override + public void setNClob(String parameterName, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterName, reader, length); + } + + @Override + public NClob getNClob(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getNClob(parameterIndex); + } + + @Override + public NClob getNClob(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getNClob(parameterName); + } + + @Override + public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException { + ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject); + } + + @Override + public SQLXML getSQLXML(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getSQLXML(parameterIndex); + } + + @Override + public SQLXML getSQLXML(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getSQLXML(parameterName); + } + + @Override + public String getNString(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getNString(parameterIndex); + } + + @Override + public String getNString(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getNString(parameterName); + } + + @Override + public Reader getNCharacterStream(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getNCharacterStream(parameterIndex); + } + + @Override + public Reader getNCharacterStream(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getNCharacterStream(parameterName); + } + + @Override + public Reader getCharacterStream(int parameterIndex) throws SQLException { + return ((CallableStatement)stmt).getCharacterStream(parameterIndex); + } + + @Override + public Reader getCharacterStream(String parameterName) throws SQLException { + return ((CallableStatement)stmt).getCharacterStream(parameterName); + } + + @Override + public void setBlob(String parameterName, Blob x) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterName, x); + } + + @Override + public void setClob(String parameterName, Clob x) throws SQLException { + ((CallableStatement)stmt).setClob(parameterName, x); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterName, x, length); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterName, x, length); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length); + } + + @Override + public void setAsciiStream(String parameterName, InputStream x) throws SQLException { + ((CallableStatement)stmt).setAsciiStream(parameterName, x); + } + + @Override + public void setBinaryStream(String parameterName, InputStream x) throws SQLException { + ((CallableStatement)stmt).setBinaryStream(parameterName, x); + } + + @Override + public void setCharacterStream(String parameterName, Reader reader) throws SQLException { + ((CallableStatement)stmt).setCharacterStream(parameterName, reader); + } + + @Override + public void setNCharacterStream(String parameterName, Reader value) throws SQLException { + ((CallableStatement)stmt).setNCharacterStream(parameterName, value); + } + + @Override + public void setClob(String parameterName, Reader reader) throws SQLException { + ((CallableStatement)stmt).setClob(parameterName, reader); + } + + @Override + public void setBlob(String parameterName, InputStream inputStream) throws SQLException { + ((CallableStatement)stmt).setBlob(parameterName, inputStream); + } + + @Override + public void setNClob(String parameterName, Reader reader) throws SQLException { + ((CallableStatement)stmt).setNClob(parameterName, reader); + } + + @Override + public T getObject(int parameterIndex, Class type) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterIndex, type); + } + + @Override + public T getObject(String parameterName, Class type) throws SQLException { + return ((CallableStatement)stmt).getObject(parameterName, type); + } + + private void synchronizeTables(String sql) { + if (sql == null || sql.trim().toLowerCase().startsWith("create")) { + if (mgr != null) { + try { + mgr.synchronizeTables(); + } catch (QueryException e) { + + e.printStackTrace(); + } + } + } + } +} diff --git a/src/main/java/org/onap/music/mdbc/Range.java b/src/main/java/org/onap/music/mdbc/Range.java new file mode 100644 index 0000000..b33fb1c --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/Range.java @@ -0,0 +1,34 @@ +package org.onap.music.mdbc; + +import java.io.Serializable; + + +/** + * This class represent a range of the whole database + * For now a range represents directly a table in Cassandra + * In the future we may decide to partition ranges differently + * @author Enrique Saurez + */ +public class Range implements Serializable { + + private static final long serialVersionUID = 1610744496930800088L; + + final public String table; + + public Range(String table) { + this.table = table; + } + + /** + * Compares to Range types + * @param other the other range against which this is compared + * @return the equality result + */ + public boolean equal(Range other) { + return (table == other.table); + } + + public boolean overlaps(Range other) { + return table == other.table; + } +} \ No newline at end of file diff --git a/src/main/java/org/onap/music/mdbc/RedoRow.java b/src/main/java/org/onap/music/mdbc/RedoRow.java new file mode 100644 index 0000000..db17e60 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/RedoRow.java @@ -0,0 +1,29 @@ +package org.onap.music.mdbc; + +public class RedoRow { + private String redoTableName; + private String redoRowIndex; + + public RedoRow(){} + + public RedoRow(String redoTableName, String redoRowIndex){ + this.redoRowIndex = redoRowIndex; + this.redoTableName = redoTableName; + } + + public String getRedoTableName() { + return redoTableName; + } + + public void setRedoTableName(String redoTableName) { + this.redoTableName = redoTableName; + } + + public String getRedoRowIndex() { + return redoRowIndex; + } + + public void setRedoRowIndex(String redoRowIndex) { + this.redoRowIndex = redoRowIndex; + } +} diff --git a/src/main/java/org/onap/music/mdbc/StateManager.java b/src/main/java/org/onap/music/mdbc/StateManager.java new file mode 100644 index 0000000..b2c2adb --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/StateManager.java @@ -0,0 +1,209 @@ +package org.onap.music.mdbc; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.logging.format.AppMessages; +import org.onap.music.logging.format.ErrorSeverity; +import org.onap.music.logging.format.ErrorTypes; +import org.onap.music.mdbc.mixins.MixinFactory; +import org.onap.music.mdbc.mixins.MusicInterface; +import org.onap.music.mdbc.mixins.MusicMixin; +import org.onap.music.mdbc.tables.TxCommitProgress; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +/** + * \TODO Implement an interface for the server logic and a factory + * @author Enrique Saurez + */ +public class StateManager { + + //\TODO We need to fix the auto-commit mode and multiple transactions with the same connection + + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StateManager.class); + + /** + * This is the interface used by all the MusicSqlManagers, + * that are created by the MDBC Server + * @see MusicInterface + */ + private MusicInterface musicManager; + /** + * This is the Running Queries information table. + * It mainly contains information about the entities + * that have being committed so far. + */ + private TxCommitProgress transactionInfo; + + private Map mdbcConnections; + + private String sqlDatabase; + + private String url; + + private Properties info; + + @SuppressWarnings("unused") + private DatabasePartition ranges; + + public StateManager(String url, Properties info, DatabasePartition ranges, String sqlDatabase) throws MDBCServiceException { + this.sqlDatabase = sqlDatabase; + this.ranges = ranges; + this.url = url; + this.info = info; + this.transactionInfo = new TxCommitProgress(); + //\fixme this is not really used, delete! + String cassandraUrl = info.getProperty(Configuration.KEY_CASSANDRA_URL, Configuration.CASSANDRA_URL_DEFAULT); + String mixin = info.getProperty(Configuration.KEY_MUSIC_MIXIN_NAME, Configuration.MUSIC_MIXIN_DEFAULT); + init(mixin, cassandraUrl); + } + + protected void init(String mixin, String cassandraUrl) throws MDBCServiceException { + this.musicManager = MixinFactory.createMusicInterface(mixin, cassandraUrl, info,ranges); + this.musicManager.createKeyspace(); + try { + this.musicManager.initializeMetricDataStructures(); + } catch (MDBCServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); + throw(e); + } + MusicMixin.loadProperties(); + this.mdbcConnections = new HashMap<>(); + initSqlDatabase(); + } + + protected void initSqlDatabase() throws MDBCServiceException { + try { + //\TODO: pass the driver as a variable + Class.forName("org.mariadb.jdbc.Driver"); + } + catch (ClassNotFoundException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); + return; + } + try { + Connection sqlConnection = DriverManager.getConnection(this.url, this.info); + StringBuilder sql = new StringBuilder("CREATE DATABASE IF NOT EXISTS ") + .append(sqlDatabase) + .append(";"); + Statement stmt = sqlConnection.createStatement(); + stmt.execute(sql.toString()); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); + throw new MDBCServiceException(e.getMessage()); + } + } + + public void CloseConnection(String connectionId){ + //\TODO check if there is a race condition + if(mdbcConnections.containsKey(connectionId)) { + transactionInfo.deleteTxProgress(connectionId); + try { + Connection conn = mdbcConnections.get(connectionId); + if(conn!=null) + conn.close(); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); + } + mdbcConnections.remove(connectionId); + } + } + + public void OpenConnection(String id, Properties information){ + if(!mdbcConnections.containsKey(id)){ + Connection sqlConnection; + MdbcConnection newConnection; + //Create connection to local SQL DB + //\TODO: create function to generate connection outside of open connection and get connection + try { + //\TODO: pass the driver as a variable + Class.forName("org.mariadb.jdbc.Driver"); + } + catch (ClassNotFoundException e) { + // TODO Auto-generated catch block + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR); + return; + } + try { + sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + sqlConnection = null; + } + //Create MDBC connection + try { + newConnection = new MdbcConnection(id, this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges); + } catch (MDBCServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + newConnection = null; + return; + } + logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id); + transactionInfo.createNewTransactionTracker(id, sqlConnection); + if(newConnection != null) { + mdbcConnections.put(id,newConnection); + } + } + } + + /** + * This function returns the connection to the corresponding transaction + * @param id of the transaction, created using + * @return + */ + public Connection GetConnection(String id) { + if(mdbcConnections.containsKey(id)) { + //\TODO: Verify if this make sense + // Intent: reinitialize transaction progress, when it already completed the previous tx for the same connection + if(transactionInfo.isComplete(id)) { + transactionInfo.reinitializeTxProgress(id); + } + return mdbcConnections.get(id); + } + + Connection sqlConnection; + MdbcConnection newConnection; + try { + //TODO: pass the driver as a variable + Class.forName("org.mariadb.jdbc.Driver"); + } + catch (ClassNotFoundException e) { + // TODO Auto-generated catch block + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + } + + //Create connection to local SQL DB + try { + sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info); + } catch (SQLException e) { + logger.error("sql connection was not created correctly"); + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + sqlConnection = null; + } + //Create MDBC connection + try { + newConnection = new MdbcConnection(id,this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges); + } catch (MDBCServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR); + newConnection = null; + } + logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id); + + transactionInfo.createNewTransactionTracker(id, sqlConnection); + if(newConnection != null) { + mdbcConnections.put(id,newConnection); + } + return newConnection; + } + + public void InitializeSystem() { + //\TODO Prefetch data to system using the data ranges as guide + throw new UnsupportedOperationException("Function initialize system needs to be implemented id MdbcStateManager"); + } +} diff --git a/src/main/java/org/onap/music/mdbc/TableInfo.java b/src/main/java/org/onap/music/mdbc/TableInfo.java new file mode 100755 index 0000000..ee272d8 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/TableInfo.java @@ -0,0 +1,75 @@ +package org.onap.music.mdbc; + +import java.sql.Types; +import java.util.ArrayList; +import java.util.List; + +/** + * Information about a table in the local database. It consists of three ordered list, which should all have the + * same length. A list of column names, a list of DB column types, and a list of booleans specifying which columns are keys. + * @author Robert P. Eby + */ +public class TableInfo { + /** An ordered list of the column names in this table */ + public List columns; + /** An ordered list of the column types in this table; the types are integers taken from {@link java.sql.Types}. */ + public List coltype; + /** An ordered list of booleans indicating if a column is a primary key column or not. */ + public List iskey; + + /** Construct an (initially) empty TableInfo. */ + public TableInfo() { + columns = new ArrayList(); + coltype = new ArrayList(); + iskey = new ArrayList(); + } + /** + * Check whether the column whose name is name is a primary key column. + * @param name the column name + * @return true if it is, false otherwise + */ + public boolean iskey(String name) { + for (int i = 0; i < columns.size(); i++) { + if (this.columns.get(i).equalsIgnoreCase(name)) + return this.iskey.get(i); + } + return false; + } + /** + * Get the type of the column whose name is name. + * @param name the column name + * @return the column type or Types.NULL + */ + public int getColType(String name) { + for (int i = 0; i < columns.size(); i++) { + if (this.columns.get(i).equalsIgnoreCase(name)) + return this.coltype.get(i); + } + return Types.NULL; + } + + /** + * Checks if this table has a primary key + * @return + */ + public boolean hasKey() { + for (Boolean b: iskey) { + if (b) { + return true; + } + } + return false; + } + + public List getKeyColumns(){ + List keys = new ArrayList(); + int idx = 0; + for (Boolean b: iskey) { + if (b) { + keys.add(this.columns.get(idx)); + } + idx++; + } + return keys; + } +} diff --git a/src/main/java/org/onap/music/mdbc/configurations/NodeConfiguration.java b/src/main/java/org/onap/music/mdbc/configurations/NodeConfiguration.java new file mode 100644 index 0000000..02de1b8 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/configurations/NodeConfiguration.java @@ -0,0 +1,71 @@ +package org.onap.music.mdbc.configurations; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.DatabasePartition; +import org.onap.music.mdbc.MDBCUtils; +import org.onap.music.mdbc.Range; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class NodeConfiguration { + + private static transient final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(NodeConfiguration.class); + + public String sqlDatabaseName; + public DatabasePartition partition; + public String nodeName; + + public NodeConfiguration(String tables, String mriIndex, String mriTableName, String partitionId, String sqlDatabaseName, String node, String redoRecordsTable){ + partition = new DatabasePartition(toRanges(tables), mriIndex, mriTableName, partitionId, null, redoRecordsTable) ; + this.sqlDatabaseName = sqlDatabaseName; + this.nodeName = node; + } + + protected Set toRanges(String tables){ + Set newRange = new HashSet<>(); + String[] tablesArray=tables.split(","); + for(String table: tablesArray) { + newRange.add(new Range(table)); + } + return newRange; + } + + public String toJson() { + GsonBuilder builder = new GsonBuilder(); + builder.setPrettyPrinting().serializeNulls();; + Gson gson = builder.create(); + return gson.toJson(this); + } + + public void saveToFile(String file){ + try { + String serialized = this.toJson(); + MDBCUtils.saveToFile(serialized,file,LOG); + } catch (IOException e) { + e.printStackTrace(); + // Exit with error + System.exit(1); + } + } + + public static NodeConfiguration readJsonFromFile( String filepath) throws FileNotFoundException { + BufferedReader br; + try { + br = new BufferedReader( + new FileReader(filepath)); + } catch (FileNotFoundException e) { + LOG.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e); + throw e; + } + Gson gson = new Gson(); + NodeConfiguration config = gson.fromJson(br, NodeConfiguration.class); + return config; + } +} diff --git a/src/main/java/org/onap/music/mdbc/configurations/TablesConfiguration.java b/src/main/java/org/onap/music/mdbc/configurations/TablesConfiguration.java new file mode 100644 index 0000000..07f87cf --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/configurations/TablesConfiguration.java @@ -0,0 +1,179 @@ +package org.onap.music.mdbc.configurations; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.DatabaseOperations; +import org.onap.music.mdbc.RedoRow; +import org.onap.music.mdbc.mixins.CassandraMixin; +import com.google.gson.Gson; +import org.onap.music.datastore.PreparedQueryObject; +import org.onap.music.exceptions.MusicServiceException; +import org.onap.music.main.MusicCore; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.util.ArrayList; +import java.util.List; + +public class TablesConfiguration { + + private final String TIT_TABLE_NAME = "transactioninformation"; + private final String MUSIC_TX_DIGEST_TABLE_NAME = "musictxdigest"; + + private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TablesConfiguration.class); + private List partitions; + private String internalNamespace; + private int internalReplicationFactor; + private String musicNamespace; + private String tableToPartitionName; + private String partitionInformationTableName; + private String redoHistoryTableName; + private String sqlDatabaseName; + + public TablesConfiguration(){} + + /** + * This functions initalize all the corresponding tables and rows + * @return a list of node configurations to be used when starting each of the servers + * @throws MDBCServiceException + * @apiNote This function assumes that when used, there is not associated redo history in the tables to the tables that are going to be managed by this configuration file + */ + public List initializeAndCreateNodeConfigurations() throws MDBCServiceException { + initInternalNamespace(); + DatabaseOperations.createNamespace(musicNamespace, internalReplicationFactor); + List nodeConfigs = new ArrayList<>(); + String ttpName = (tableToPartitionName==null || tableToPartitionName.isEmpty())?CassandraMixin.TABLE_TO_PARTITION_TABLE_NAME:tableToPartitionName; + DatabaseOperations.CreateTableToPartitionTable(musicNamespace,ttpName); + String pitName = (partitionInformationTableName==null || partitionInformationTableName.isEmpty())?CassandraMixin.PARTITION_INFORMATION_TABLE_NAME:partitionInformationTableName; + DatabaseOperations.CreatePartitionInfoTable(musicNamespace,pitName); + String rhName = (redoHistoryTableName==null || redoHistoryTableName.isEmpty())?CassandraMixin.REDO_HISTORY_TABLE_NAME:redoHistoryTableName; + DatabaseOperations.CreateRedoHistoryTable(musicNamespace,rhName); + if(partitions == null){ + logger.error("Partitions was not correctly initialized"); + throw new MDBCServiceException("Partition was not correctly initialized"); + } + for(PartitionInformation partitionInfo : partitions){ + String mriTableName = partitionInfo.mriTableName; + mriTableName = (mriTableName==null || mriTableName.isEmpty())?TIT_TABLE_NAME:mriTableName; + //0) Create the corresponding Music Range Information table + DatabaseOperations.CreateMusicRangeInformationTable(musicNamespace,mriTableName); + String musicTxDigestTableName = partitionInfo.mtxdTableName; + musicTxDigestTableName = (musicTxDigestTableName==null || musicTxDigestTableName.isEmpty())? MUSIC_TX_DIGEST_TABLE_NAME :musicTxDigestTableName; + DatabaseOperations.CreateMusicTxDigest(-1,musicNamespace,musicTxDigestTableName); + String partitionId; + if(partitionInfo.partitionId==null || partitionInfo.partitionId.isEmpty()){ + if(partitionInfo.replicationFactor==0){ + logger.error("Replication factor and partition id are both empty, and this is an invalid configuration" ); + throw new MDBCServiceException("Replication factor and partition id are both empty, and this is an invalid configuration"); + } + //1) Create a row in the partition info table + partitionId = DatabaseOperations.createPartitionInfoRow(musicNamespace,pitName,partitionInfo.replicationFactor,partitionInfo.tables,null); + + } + else{ + partitionId = partitionInfo.partitionId; + } + //2) Create a row in the transaction information table + String mriTableIndex = DatabaseOperations.CreateEmptyTitRow(musicNamespace,mriTableName,partitionId,null); + //3) Add owner and tit information to partition info table + RedoRow newRedoRow = new RedoRow(mriTableName,mriTableIndex); + DatabaseOperations.updateRedoRow(musicNamespace,pitName,partitionId,newRedoRow,partitionInfo.owner,null); + //4) Update ttp with the new partition + for(String table: partitionInfo.tables) { + DatabaseOperations.updateTableToPartition(musicNamespace, ttpName, table, partitionId, null); + } + //5) Add it to the redo history table + DatabaseOperations.createRedoHistoryBeginRow(musicNamespace,rhName,newRedoRow,partitionId,null); + //6) Create config for this node + nodeConfigs.add(new NodeConfiguration(String.join(",",partitionInfo.tables),mriTableIndex,mriTableName,partitionId,sqlDatabaseName,partitionInfo.owner,musicTxDigestTableName)); + } + return nodeConfigs; + } + + private void initInternalNamespace() throws MDBCServiceException { + DatabaseOperations.createNamespace(internalNamespace,internalReplicationFactor); + StringBuilder createKeysTableCql = new StringBuilder("CREATE TABLE IF NOT EXISTS ") + .append(internalNamespace) + .append(".unsynced_keys (key text PRIMARY KEY);"); + PreparedQueryObject queryObject = new PreparedQueryObject(); + queryObject.appendQueryString(createKeysTableCql.toString()); + try { + MusicCore.createTable(internalNamespace,"unsynced_keys", queryObject,"critical"); + } catch (MusicServiceException e) { + logger.error("Error creating unsynced keys table" ); + throw new MDBCServiceException("Error creating unsynced keys table"); + } + } + + public static TablesConfiguration readJsonFromFile(String filepath) throws FileNotFoundException { + BufferedReader br; + try { + br = new BufferedReader( + new FileReader(filepath)); + } catch (FileNotFoundException e) { + logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e); + throw e; + } + Gson gson = new Gson(); + TablesConfiguration config = gson.fromJson(br, TablesConfiguration.class); + return config; + } + + public class PartitionInformation{ + private List tables; + private String owner; + private String mriTableName; + private String mtxdTableName; + private String partitionId; + private int replicationFactor; + + public List getTables() { + return tables; + } + + public void setTables(List tables) { + this.tables = tables; + } + + public String getOwner() { + return owner; + } + + public void setOwner(String owner) { + this.owner = owner; + } + + public String getMriTableName() { + return mriTableName; + } + + public void setMriTableName(String mriTableName) { + this.mriTableName = mriTableName; + } + + public String getPartitionId() { + return partitionId; + } + + public void setPartitionId(String partitionId) { + this.partitionId = partitionId; + } + + public int getReplicationFactor() { + return replicationFactor; + } + + public void setReplicationFactor(int replicationFactor) { + this.replicationFactor = replicationFactor; + } + + public String getMtxdTableName(){ + return mtxdTableName; + } + + public void setMtxdTableName(String mtxdTableName) { + this.mtxdTableName = mtxdTableName; + } + } +} diff --git a/src/main/java/org/onap/music/mdbc/configurations/config-0.json b/src/main/java/org/onap/music/mdbc/configurations/config-0.json new file mode 100644 index 0000000..2207a52 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/configurations/config-0.json @@ -0,0 +1,16 @@ +{ + "sqlDatabaseName": "test", + "partition": { + "musicRangeInformationTable": "transactioninformation", + "musicRangeInformationIndex": "259a7a7c-f741-44ae-8d6e-227a02ddc96e", + "musicTxDigestTable": "musictxdigest", + "partitionId": "ad766447-1adf-4800-aade-9f31a356ab4b", + "lockId": "", + "ranges": [ + { + "table": "table11" + } + ] + }, + "nodeName": "" +} diff --git a/src/main/java/org/onap/music/mdbc/configurations/ranges.json b/src/main/java/org/onap/music/mdbc/configurations/ranges.json new file mode 100644 index 0000000..2a792e8 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/configurations/ranges.json @@ -0,0 +1,14 @@ +{ + "musicRangeInformationTable": "transactioninformation", + "musicRangeInformationIndex": "d0e8ef2e-aeca-4261-8d9d-1679f560b85b", + "partitionId": "798110cf-9c61-4db2-9446-cb2dbab5a143", + "lockId": "", + "ranges": [ + { + "table": "table1" + }, + { + "table": "table2" + } + ] +} diff --git a/src/main/java/org/onap/music/mdbc/configurations/tableConfiguration.json b/src/main/java/org/onap/music/mdbc/configurations/tableConfiguration.json new file mode 100644 index 0000000..e67dd0b --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/configurations/tableConfiguration.json @@ -0,0 +1,19 @@ +{ + "partitions" : [ + { + "tables":["table11"], + "owner":"", + "mriTableName":"musicrangeinformation", + "mtxdTableName":"musictxdigest", + "partitionId":"", + "replicationFactor":1 + } + ], + "musicNamespace":"namespace", + "tableToPartitionName":"tabletopartition", + "partitionInformationTableName":"partitioninfo", + "redoHistoryTableName":"redohistory", + "sqlDatabaseName":"test", + "internalNamespace":"music_internal", + "internalReplicationFactor":1 +} diff --git a/src/main/java/org/onap/music/mdbc/examples/EtdbTestClient.java b/src/main/java/org/onap/music/mdbc/examples/EtdbTestClient.java new file mode 100644 index 0000000..2a25667 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/examples/EtdbTestClient.java @@ -0,0 +1,125 @@ +package org.onap.music.mdbc.examples; + +import java.sql.*; +import org.apache.calcite.avatica.remote.Driver; + +public class EtdbTestClient { + + public static class Hr { + public final Employee[] emps = { + new Employee(100, "Bill"), + new Employee(200, "Eric"), + new Employee(150, "Sebastian"), + }; + } + + public static class Employee { + public final int empid; + public final String name; + + public Employee(int empid, String name) { + this.empid = empid; + this.name = name; + } + } + + public static void main(String[] args){ + try { + Class.forName("org.apache.calcite.avatica.remote.Driver"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + System.exit(1); + } + Connection connection; + try { + connection = DriverManager.getConnection("jdbc:avatica:remote:url=http://localhost:30000;serialization=protobuf"); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + try { + connection.setAutoCommit(false); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + + final String sql = "CREATE TABLE IF NOT EXISTS Persons (\n" + + " PersonID int,\n" + + " LastName varchar(255),\n" + + " FirstName varchar(255),\n" + + " Address varchar(255),\n" + + " City varchar(255)\n" + + ");"; + Statement stmt; + try { + stmt = connection.createStatement(); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + boolean execute; + try { + execute = stmt.execute(sql); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + if (execute) { + try { + connection.commit(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + try { + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + + final String insertSQL = "INSERT INTO Persons VALUES (1, 'Martinez', 'Juan', 'KACB', 'ATLANTA');"; + Statement insertStmt; + try { + insertStmt = connection.createStatement(); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + try { + execute = insertStmt.execute(insertSQL); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + try { + connection.commit(); + } catch (SQLException e) { + e.printStackTrace(); + return; + } + + try { + stmt.close(); + insertStmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + + try { + connection.commit(); + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + + + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/Cassandra2Mixin.java b/src/main/java/org/onap/music/mdbc/mixins/Cassandra2Mixin.java new file mode 100755 index 0000000..372224d --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/Cassandra2Mixin.java @@ -0,0 +1,287 @@ +package org.onap.music.mdbc.mixins; + +import java.sql.Types; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import org.json.JSONObject; +import org.json.JSONTokener; +import org.onap.music.datastore.PreparedQueryObject; +import org.onap.music.exceptions.MusicServiceException; +import org.onap.music.main.MusicCore; +import org.onap.music.main.ReturnType; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.DatabasePartition; +import org.onap.music.mdbc.TableInfo; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; + +/** + * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence + * to calls to the user's DB. It stores dirty row references in one table (called DIRTY____) rather than one dirty + * table per real table (as {@link org.onap.music.mdbc.mixins.CassandraMixin} does). + * + * @author Robert P. Eby + */ +public class Cassandra2Mixin extends CassandraMixin { + private static final String DIRTY_TABLE = "DIRTY____"; // it seems Cassandra won't allow __DIRTY__ + private boolean dirty_table_created = false; + + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Cassandra2Mixin.class); + + public Cassandra2Mixin() { + super(); + } + + public Cassandra2Mixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException { + super(url, info,ranges); + } + + /** + * Get the name of this MusicInterface mixin object. + * @return the name + */ + @Override + public String getMixinName() { + return "cassandra2"; + } + /** + * Do what is needed to close down the MUSIC connection. + */ + @Override + public void close() { + super.close(); + } + + /** + * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables. + * The keyspace name comes from the initialization properties passed to the JDBC driver. + */ + @Override + public void createKeyspace() { + super.createKeyspace(); + } + + /** + * This method performs all necessary initialization in Music/Cassandra to store the table tableName. + * @param tableName the table to initialize MUSIC for + */ + @Override + public void initializeMusicForTable(TableInfo ti, String tableName) { + super.initializeMusicForTable(ti, tableName); + } + + /** + * Create a dirty row table for the real table tableName. The primary keys columns from the real table are recreated in + * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC. + * @param tableName the table to create a "dirty" table for + */ + @Override + public void createDirtyRowTable(TableInfo ti, String tableName) { + if (!dirty_table_created) { + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (tablename TEXT, replica TEXT, keyset TEXT, PRIMARY KEY(tablename, replica, keyset));", music_ns, DIRTY_TABLE); + executeMusicWriteQuery(cql); + dirty_table_created = true; + } + } + /** + * Drop the dirty row table for tableName from MUSIC. + * @param tableName the table being dropped + */ + @Override + public void dropDirtyRowTable(String tableName) { + // no-op + } + + private String buildJSON(TableInfo ti, String tableName, Object[] keys) { + // Build JSON string representing this keyset + JSONObject jo = new JSONObject(); + int j = 0; + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + jo.put(ti.columns.get(i), keys[j++]); + } + } + return jo.toString(); + } + /** + * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys + * @param tableName the table we are removing dirty entries from + * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. + */ + @Override + public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) { + String cql = String.format("DELETE FROM %s.%s WHERE tablename = ? AND replica = ? AND keyset = ?;", music_ns, DIRTY_TABLE); + //Session sess = getMusicSession(); + //PreparedStatement ps = getPreparedStatementFromCache(cql); + Object[] values = new Object[] { tableName, myId, keys }; + logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]); + + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + pQueryObject.addValue(tableName); + pQueryObject.addValue(myId); + pQueryObject.addValue(keys); + ReturnType rt = MusicCore.eventualPut(pQueryObject); + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage()); + } + /*BoundStatement bound = ps.bind(values); + bound.setReadTimeoutMillis(60000); + synchronized (sess) { + sess.execute(bound); + }*/ + } + /** + * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica, + * and consist of a Map of primary key column names and values. + * @param tableName the table we are querying for + * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row". + */ + @SuppressWarnings("deprecation") + @Override + public List> getDirtyRows(TableInfo ti, String tableName) { + String cql = String.format("SELECT keyset FROM %s.%s WHERE tablename = ? AND replica = ?;", music_ns, DIRTY_TABLE); + logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + tableName + " " + myId); + + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + pQueryObject.addValue(tableName); + pQueryObject.addValue(myId); + ResultSet results = null; + try { + results = MusicCore.get(pQueryObject); + } catch (MusicServiceException e) { + e.printStackTrace(); + } + /*Session sess = getMusicSession(); + PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(new Object[] { tableName, myId }); + bound.setReadTimeoutMillis(60000); + ResultSet results = null; + synchronized (sess) { + results = sess.execute(bound); + }*/ + List> list = new ArrayList>(); + for (Row row : results) { + String json = row.getString("keyset"); + JSONObject jo = new JSONObject(new JSONTokener(json)); + Map objs = new HashMap(); + for (String colname : jo.keySet()) { + int coltype = ti.getColType(colname); + switch (coltype) { + case Types.BIGINT: + objs.put(colname, jo.getLong(colname)); + break; + case Types.BOOLEAN: + objs.put(colname, jo.getBoolean(colname)); + break; + case Types.BLOB: + logger.error(EELFLoggerDelegate.errorLogger,"WE DO NOT SUPPORT BLOBS AS PRIMARY KEYS!! COLUMN NAME="+colname); + // throw an exception here??? + break; + case Types.DOUBLE: + objs.put(colname, jo.getDouble(colname)); + break; + case Types.INTEGER: + objs.put(colname, jo.getInt(colname)); + break; + case Types.TIMESTAMP: + objs.put(colname, new Date(jo.getString(colname))); + break; + case Types.VARCHAR: + default: + objs.put(colname, jo.getString(colname)); + break; + } + } + list.add(objs); + } + return list; + } + + /** + * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first. + * @param tableName This is the table that has been dropped + */ + @Override + public void clearMusicForTable(String tableName) { + super.clearMusicForTable(tableName); + } + /** + * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the + * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates + * it to the other replicas. + * + * @param tableName This is the table that has changed. + * @param oldRow This is a copy of the old row being deleted + */ + public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) { + super.deleteFromEntityTableInMusic(ti, tableName, oldRow); + } + /** + * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local + * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL + * @param tableName This is the table on which the select is being performed + */ + @Override + public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) { + super.readDirtyRowsAndUpdateDb(dbi, tableName); + } + + /** + * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the + * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates + * it to the other replicas. + * + * @param tableName This is the table that has changed. + * @param changedRow This is information about the row that has changed + */ + @Override + public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) { + super.updateDirtyRowAndEntityTableInMusic(ti, tableName, changedRow); + } + + /** + * Mark rows as "dirty" in the dirty rows table for tableName. Rows are marked for all replicas but + * this one (this replica already has the up to date data). + * @param tableName the table we are marking dirty + * @param keys an ordered list of the values being put into the table. The values that correspond to the tables' + * primary key are copied into the dirty row table. + */ + @Deprecated + public void markDirtyRow(TableInfo ti, String tableName, Object[] keys) { + String cql = String.format("INSERT INTO %s.%s (tablename, replica, keyset) VALUES (?, ?, ?);", music_ns, DIRTY_TABLE); + /*Session sess = getMusicSession(); + PreparedStatement ps = getPreparedStatementFromCache(cql);*/ + @SuppressWarnings("unused") + Object[] values = new Object[] { tableName, "", buildJSON(ti, tableName, keys) }; + PreparedQueryObject pQueryObject = null; + for (String repl : allReplicaIds) { + /*if (!repl.equals(myId)) { + values[1] = repl; + logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]); + + BoundStatement bound = ps.bind(values); + bound.setReadTimeoutMillis(60000); + synchronized (sess) { + sess.execute(bound); + } + }*/ + pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + pQueryObject.addValue(tableName); + pQueryObject.addValue(repl); + pQueryObject.addValue(buildJSON(ti, tableName, keys)); + ReturnType rt = MusicCore.eventualPut(pQueryObject); + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + System.out.println("Failure while critical put..."+rt.getMessage()); + } + } + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/CassandraMixin.java b/src/main/java/org/onap/music/mdbc/mixins/CassandraMixin.java new file mode 100755 index 0000000..cb9c6e2 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/CassandraMixin.java @@ -0,0 +1,1261 @@ +package org.onap.music.mdbc.mixins; + +import java.io.IOException; +import java.io.Reader; +import java.nio.ByteBuffer; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; + +import org.onap.music.mdbc.*; +import org.onap.music.mdbc.tables.PartitionInformation; +import org.onap.music.mdbc.tables.MusixTxDigestId; +import org.onap.music.mdbc.tables.StagingTable; +import org.onap.music.mdbc.tables.MriReference; +import org.onap.music.mdbc.tables.MusicRangeInformationRow; +import org.onap.music.mdbc.tables.TxCommitProgress; + +import org.json.JSONObject; +import org.onap.music.datastore.CassaLockStore; +import org.onap.music.datastore.PreparedQueryObject; +import org.onap.music.exceptions.MusicLockingException; +import org.onap.music.exceptions.MusicQueryException; +import org.onap.music.exceptions.MusicServiceException; +import org.onap.music.main.MusicCore; +import org.onap.music.main.ResultType; +import org.onap.music.main.ReturnType; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.logging.EELFLoggerDelegate; +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.ColumnDefinitions; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; + +/** + * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence + * to calls to the user's DB. It does not do any table or row locking. + * + *

This code only supports the following limited list of H2 and Cassandra data types:

+ * + * + * + * + * + * + * + * + * + * + *
H2 Data TypeMapped to Cassandra Data Type
BIGINTBIGINT
BOOLEANBOOLEAN
CLOBBLOB
DOUBLEDOUBLE
INTEGERINT
TIMESTAMPTIMESTAMP
VARBINARYBLOB
VARCHARVARCHAR
+ * + * @author Robert P. Eby + */ +public class CassandraMixin implements MusicInterface { + /** The property name to use to identify this replica to MusicSqlManager */ + public static final String KEY_MY_ID = "myid"; + /** The property name to use for the comma-separated list of replica IDs. */ + public static final String KEY_REPLICAS = "replica_ids"; + /** The property name to use to identify the IP address for Cassandra. */ + public static final String KEY_MUSIC_ADDRESS = "music_address"; + /** The property name to use to provide the replication factor for Cassandra. */ + public static final String KEY_MUSIC_RFACTOR = "music_rfactor"; + /** The property name to use to provide the replication factor for Cassandra. */ + public static final String KEY_MUSIC_NAMESPACE = "music_namespace"; + /** The default property value to use for the Cassandra keyspace. */ + public static final String DEFAULT_MUSIC_KEYSPACE = "mdbc"; + /** The default property value to use for the Cassandra IP address. */ + public static final String DEFAULT_MUSIC_ADDRESS = "localhost"; + /** The default property value to use for the Cassandra replication factor. */ + public static final int DEFAULT_MUSIC_RFACTOR = 1; + /** The default primary string column, if none is provided. */ + public static final String MDBC_PRIMARYKEY_NAME = "mdbc_cuid"; + /** Type of the primary key, if none is defined by the user */ + public static final String MDBC_PRIMARYKEY_TYPE = "uuid"; + /** Namespace for the tables in MUSIC (Cassandra) */ + public static final String DEFAULT_MUSIC_NAMESPACE = "namespace"; + + /** Name of the tables required for MDBC */ + public static final String TABLE_TO_PARTITION_TABLE_NAME = "tabletopartition"; + public static final String PARTITION_INFORMATION_TABLE_NAME = "partitioninfo"; + public static final String REDO_HISTORY_TABLE_NAME= "redohistory"; + //\TODO Add logic to change the names when required and create the tables when necessary + private String musicTxDigestTableName = "musictxdigest"; + private String musicRangeInformationTableName = "musicrangeinformation"; + + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(CassandraMixin.class); + + private static final Map typemap = new HashMap<>(); + static { + // We only support the following type mappings currently (from DB -> Cassandra). + // Anything else will likely cause a NullPointerException + typemap.put(Types.BIGINT, "BIGINT"); // aka. IDENTITY + typemap.put(Types.BLOB, "VARCHAR"); + typemap.put(Types.BOOLEAN, "BOOLEAN"); + typemap.put(Types.CLOB, "BLOB"); + typemap.put(Types.DATE, "VARCHAR"); + typemap.put(Types.DOUBLE, "DOUBLE"); + typemap.put(Types.DECIMAL, "DECIMAL"); + typemap.put(Types.INTEGER, "INT"); + //typemap.put(Types.TIMESTAMP, "TIMESTAMP"); + typemap.put(Types.SMALLINT, "SMALLINT"); + typemap.put(Types.TIMESTAMP, "VARCHAR"); + typemap.put(Types.VARBINARY, "BLOB"); + typemap.put(Types.VARCHAR, "VARCHAR"); + typemap.put(Types.CHAR, "VARCHAR"); + //The "Hacks", these don't have a direct mapping + //typemap.put(Types.DATE, "VARCHAR"); + //typemap.put(Types.DATE, "TIMESTAMP"); + } + + protected DatabasePartition ranges; + protected final String music_ns; + protected final String myId; + protected final String[] allReplicaIds; + private final String musicAddress; + private final int music_rfactor; + private MusicConnector mCon = null; + private Session musicSession = null; + private boolean keyspace_created = false; + private Map ps_cache = new HashMap<>(); + private Set in_progress = Collections.synchronizedSet(new HashSet()); + + public CassandraMixin() { + //this.logger = null; + this.musicAddress = null; + this.music_ns = null; + this.music_rfactor = 0; + this.myId = null; + this.allReplicaIds = null; + } + + public CassandraMixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException { + this.ranges = ranges; + // Default values -- should be overridden in the Properties + // Default to using the host_ids of the various peers as the replica IDs (this is probably preferred) + this.musicAddress = info.getProperty(KEY_MUSIC_ADDRESS, DEFAULT_MUSIC_ADDRESS); + logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: musicAddress="+musicAddress); + + String s = info.getProperty(KEY_MUSIC_RFACTOR); + this.music_rfactor = (s == null) ? DEFAULT_MUSIC_RFACTOR : Integer.parseInt(s); + + this.myId = info.getProperty(KEY_MY_ID, getMyHostId()); + logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: myId="+myId); + + + this.allReplicaIds = info.getProperty(KEY_REPLICAS, getAllHostIds()).split(","); + logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: allReplicaIds="+info.getProperty(KEY_REPLICAS, this.myId)); + + this.music_ns = info.getProperty(KEY_MUSIC_NAMESPACE,DEFAULT_MUSIC_NAMESPACE); + logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: music_ns="+music_ns); + musicRangeInformationTableName = "musicrangeinformation"; + createMusicKeyspace(); + } + + private void createMusicKeyspace() throws MusicServiceException { + + Map replicationInfo = new HashMap<>(); + replicationInfo.put("'class'", "'SimpleStrategy'"); + replicationInfo.put("'replication_factor'", music_rfactor); + + PreparedQueryObject queryObject = new PreparedQueryObject(); + queryObject.appendQueryString( + "CREATE KEYSPACE " + this.music_ns + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":")); + + try { + MusicCore.nonKeyRelatedPut(queryObject, "eventual"); + } catch (MusicServiceException e) { + if (e.getMessage().equals("Keyspace "+this.music_ns+" already exists")) { + // ignore + } else { + throw(e); + } + } + } + + private String getMyHostId() { + ResultSet rs = executeMusicRead("SELECT HOST_ID FROM SYSTEM.LOCAL"); + Row row = rs.one(); + return (row == null) ? "UNKNOWN" : row.getUUID("HOST_ID").toString(); + } + private String getAllHostIds() { + ResultSet results = executeMusicRead("SELECT HOST_ID FROM SYSTEM.PEERS"); + StringBuilder sb = new StringBuilder(myId); + for (Row row : results) { + sb.append(","); + sb.append(row.getUUID("HOST_ID").toString()); + } + return sb.toString(); + } + + /** + * Get the name of this MusicInterface mixin object. + * @return the name + */ + @Override + public String getMixinName() { + return "cassandra"; + } + /** + * Do what is needed to close down the MUSIC connection. + */ + @Override + public void close() { + if (musicSession != null) { + musicSession.close(); + musicSession = null; + } + } + @Override + public void initializeMetricDataStructures() throws MDBCServiceException { + try { + DatabaseOperations.CreateMusicTxDigest(-1, music_ns, musicTxDigestTableName);//\TODO If we start partitioning the data base, we would need to use the redotable number + DatabaseOperations.CreateMusicRangeInformationTable(music_ns, musicRangeInformationTableName); + DatabaseOperations.CreateTableToPartitionTable(music_ns, TABLE_TO_PARTITION_TABLE_NAME); + DatabaseOperations.CreatePartitionInfoTable(music_ns, PARTITION_INFORMATION_TABLE_NAME); + DatabaseOperations.CreateRedoHistoryTable(music_ns, REDO_HISTORY_TABLE_NAME); + } + catch(MDBCServiceException e){ + logger.error(EELFLoggerDelegate.errorLogger,"Error creating tables in MUSIC"); + } + } + + /** + * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables. + * The keyspace name comes from the initialization properties passed to the JDBC driver. + */ + @Override + public void createKeyspace() { + if (keyspace_created == false) { + String cql = String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : %d };", music_ns, music_rfactor); + executeMusicWriteQuery(cql); + keyspace_created = true; + } + } + + /** + * This method performs all necessary initialization in Music/Cassandra to store the table tableName. + * @param tableName the table to initialize MUSIC for + */ + @Override + public void initializeMusicForTable(TableInfo ti, String tableName) { + /** + * This code creates two tables for every table in SQL: + * (i) a table with the exact same name as the SQL table storing the SQL data. + * (ii) a "dirty bits" table that stores the keys in the Cassandra table that are yet to be + * updated in the SQL table (they were written by some other node). + */ + StringBuilder fields = new StringBuilder(); + StringBuilder prikey = new StringBuilder(); + String pfx = "", pfx2 = ""; + for (int i = 0; i < ti.columns.size(); i++) { + fields.append(pfx) + .append(ti.columns.get(i)) + .append(" ") + .append(typemap.get(ti.coltype.get(i))); + if (ti.iskey.get(i)) { + // Primary key column + prikey.append(pfx2).append(ti.columns.get(i)); + pfx2 = ", "; + } + pfx = ", "; + } + if (prikey.length()==0) { + fields.append(pfx).append(MDBC_PRIMARYKEY_NAME) + .append(" ") + .append(MDBC_PRIMARYKEY_TYPE); + prikey.append("mdbc_cuid"); + } + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", music_ns, tableName, fields.toString(), prikey.toString()); + executeMusicWriteQuery(cql); + } + + // ************************************************** + // Dirty Tables (in MUSIC) methods + // ************************************************** + + /** + * Create a dirty row table for the real table tableName. The primary keys columns from the real table are recreated in + * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC. + * @param tableName the table to create a "dirty" table for + */ + @Override + public void createDirtyRowTable(TableInfo ti, String tableName) { + // create dirtybitsTable at all replicas +// for (String repl : allReplicaIds) { +//// String dirtyRowsTableName = "dirty_"+tableName+"_"+allReplicaIds[i]; +//// String dirtyTableQuery = "CREATE TABLE IF NOT EXISTS "+music_ns+"."+ dirtyRowsTableName+" (dirtyRowKeys text PRIMARY KEY);"; +// cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s_%s (dirtyRowKeys TEXT PRIMARY KEY);", music_ns, tableName, repl); +// executeMusicWriteQuery(cql); +// } + StringBuilder ddl = new StringBuilder("REPLICA__ TEXT"); + StringBuilder cols = new StringBuilder("REPLICA__"); + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + // Only use the primary keys columns in the "Dirty" table + ddl.append(", ") + .append(ti.columns.get(i)) + .append(" ") + .append(typemap.get(ti.coltype.get(i))); + cols.append(", ").append(ti.columns.get(i)); + } + } + if(cols.length()==0) { + //fixme + System.err.println("Create dirty row table found no primary key"); + } + ddl.append(", PRIMARY KEY(").append(cols).append(")"); + String cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s (%s);", music_ns, tableName, ddl.toString()); + executeMusicWriteQuery(cql); + } + /** + * Drop the dirty row table for tableName from MUSIC. + * @param tableName the table being dropped + */ + @Override + public void dropDirtyRowTable(String tableName) { + String cql = String.format("DROP TABLE %s.DIRTY_%s;", music_ns, tableName); + executeMusicWriteQuery(cql); + } + /** + * Mark rows as "dirty" in the dirty rows table for tableName. Rows are marked for all replicas but + * this one (this replica already has the up to date data). + * @param tableName the table we are marking dirty + * @param keys an ordered list of the values being put into the table. The values that correspond to the tables' + * primary key are copied into the dirty row table. + */ + @Override + public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) { + Object[] keyObj = getObjects(ti,tableName, keys); + StringBuilder cols = new StringBuilder("REPLICA__"); + PreparedQueryObject pQueryObject = null; + StringBuilder vals = new StringBuilder("?"); + List vallist = new ArrayList(); + vallist.add(""); // placeholder for replica + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + cols.append(", ").append(ti.columns.get(i)); + vals.append(", ").append("?"); + vallist.add(keyObj[i]); + } + } + if(cols.length()==0) { + //FIXME + System.err.println("markDIrtyRow need to fix primary key"); + } + String cql = String.format("INSERT INTO %s.DIRTY_%s (%s) VALUES (%s);", music_ns, tableName, cols.toString(), vals.toString()); + /*Session sess = getMusicSession(); + PreparedStatement ps = getPreparedStatementFromCache(cql);*/ + String primaryKey; + if(ti.hasKey()) { + primaryKey = getMusicKeyFromRow(ti,tableName, keys); + } + else { + primaryKey = getMusicKeyFromRowWithoutPrimaryIndexes(ti,tableName, keys); + } + System.out.println("markDirtyRow: PK value: "+primaryKey); + + Object pkObj = null; + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + pkObj = keyObj[i]; + } + } + for (String repl : allReplicaIds) { + pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + pQueryObject.addValue(tableName); + pQueryObject.addValue(repl); + pQueryObject.addValue(pkObj); + updateMusicDB(tableName, primaryKey, pQueryObject); + //if (!repl.equals(myId)) { + /*logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql); + vallist.set(0, repl); + BoundStatement bound = ps.bind(vallist.toArray()); + bound.setReadTimeoutMillis(60000); + synchronized (sess) { + sess.execute(bound); + }*/ + //} + + } + } + /** + * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys + * @param tableName the table we are removing dirty entries from + * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. + */ + @Override + public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) { + Object[] keysObjects = getObjects(ti,tableName,keys); + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + StringBuilder cols = new StringBuilder("REPLICA__=?"); + List vallist = new ArrayList(); + vallist.add(myId); + int n = 0; + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + cols.append(" AND ").append(ti.columns.get(i)).append("=?"); + vallist.add(keysObjects[n++]); + pQueryObject.addValue(keysObjects[n++]); + } + } + String cql = String.format("DELETE FROM %s.DIRTY_%s WHERE %s;", music_ns, tableName, cols.toString()); + logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql); + pQueryObject.appendQueryString(cql); + ReturnType rt = MusicCore.eventualPut(pQueryObject); + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + System.out.println("Failure while cleanDirtyRow..."+rt.getMessage()); + } + /*Session sess = getMusicSession(); + PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(vallist.toArray()); + bound.setReadTimeoutMillis(60000); + synchronized (sess) { + sess.execute(bound); + }*/ + } + /** + * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica, + * and consist of a Map of primary key column names and values. + * @param tableName the table we are querying for + * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row". + */ + @Override + public List> getDirtyRows(TableInfo ti, String tableName) { + String cql = String.format("SELECT * FROM %s.DIRTY_%s WHERE REPLICA__=?;", music_ns, tableName); + ResultSet results = null; + logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql); + + /*Session sess = getMusicSession(); + PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(new Object[] { myId }); + bound.setReadTimeoutMillis(60000); + synchronized (sess) { + results = sess.execute(bound); + }*/ + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + try { + results = MusicCore.get(pQueryObject); + } catch (MusicServiceException e) { + + e.printStackTrace(); + } + + ColumnDefinitions cdef = results.getColumnDefinitions(); + List> list = new ArrayList>(); + for (Row row : results) { + Map objs = new HashMap(); + for (int i = 0; i < cdef.size(); i++) { + String colname = cdef.getName(i).toUpperCase(); + String coltype = cdef.getType(i).getName().toString().toUpperCase(); + if (!colname.equals("REPLICA__")) { + switch (coltype) { + case "BIGINT": + objs.put(colname, row.getLong(colname)); + break; + case "BOOLEAN": + objs.put(colname, row.getBool(colname)); + break; + case "BLOB": + objs.put(colname, row.getString(colname)); + break; + case "DATE": + objs.put(colname, row.getString(colname)); + break; + case "DOUBLE": + objs.put(colname, row.getDouble(colname)); + break; + case "DECIMAL": + objs.put(colname, row.getDecimal(colname)); + break; + case "INT": + objs.put(colname, row.getInt(colname)); + break; + case "TIMESTAMP": + objs.put(colname, row.getTimestamp(colname)); + break; + case "VARCHAR": + default: + objs.put(colname, row.getString(colname)); + break; + } + } + } + list.add(objs); + } + return list; + } + + /** + * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first. + * @param tableName This is the table that has been dropped + */ + @Override + public void clearMusicForTable(String tableName) { + dropDirtyRowTable(tableName); + String cql = String.format("DROP TABLE %s.%s;", music_ns, tableName); + executeMusicWriteQuery(cql); + } + /** + * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the + * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates + * it to the other replicas. + * + * @param tableName This is the table that has changed. + * @param oldRow This is a copy of the old row being deleted + */ + @Override + public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) { + Object[] objects = getObjects(ti,tableName,oldRow); + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + if (ti.hasKey()) { + assert(ti.columns.size() == objects.length); + } else { + assert(ti.columns.size()+1 == objects.length); + } + + StringBuilder where = new StringBuilder(); + List vallist = new ArrayList(); + String pfx = ""; + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + where.append(pfx) + .append(ti.columns.get(i)) + .append("=?"); + vallist.add(objects[i]); + pQueryObject.addValue(objects[i]); + pfx = " AND "; + } + } + if (!ti.hasKey()) { + where.append(MDBC_PRIMARYKEY_NAME + "=?"); + //\FIXME this is wrong, old row is not going to contain the UUID, this needs to be fixed + vallist.add(UUID.fromString((String) objects[0])); + pQueryObject.addValue(UUID.fromString((String) objects[0])); + } + + String cql = String.format("DELETE FROM %s.%s WHERE %s;", music_ns, tableName, where.toString()); + logger.error(EELFLoggerDelegate.errorLogger,"Executing MUSIC write:"+ cql); + pQueryObject.appendQueryString(cql); + + /*PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(vallist.toArray()); + bound.setReadTimeoutMillis(60000); + Session sess = getMusicSession(); + synchronized (sess) { + sess.execute(bound); + }*/ + String primaryKey = getMusicKeyFromRow(ti,tableName, oldRow); + if(MusicMixin.criticalTables.contains(tableName)) { + ReturnType rt = null; + try { + rt = MusicCore.atomicPut(music_ns, tableName, primaryKey, pQueryObject, null); + } catch (MusicLockingException e) { + e.printStackTrace(); + } catch (MusicServiceException e) { + e.printStackTrace(); + } catch (MusicQueryException e) { + e.printStackTrace(); + } + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + System.out.println("Failure while critical put..."+rt.getMessage()); + } + } else { + ReturnType rt = MusicCore.eventualPut(pQueryObject); + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + System.out.println("Failure while critical put..."+rt.getMessage()); + } + } + // Mark the dirty rows in music for all the replicas but us + markDirtyRow(ti,tableName, oldRow); + } + + public Set getMusicTableSet(String ns) { + Set set = new TreeSet(); + String cql = String.format("SELECT TABLE_NAME FROM SYSTEM_SCHEMA.TABLES WHERE KEYSPACE_NAME = '%s'", ns); + ResultSet rs = executeMusicRead(cql); + for (Row row : rs) { + set.add(row.getString("TABLE_NAME").toUpperCase()); + } + return set; + } + /** + * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local + * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL + * @param tableName This is the table on which the select is being performed + */ + @Override + public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) { + // Read dirty rows of this table from Music + TableInfo ti = dbi.getTableInfo(tableName); + List> objlist = getDirtyRows(ti,tableName); + PreparedQueryObject pQueryObject = null; + String pre_cql = String.format("SELECT * FROM %s.%s WHERE ", music_ns, tableName); + List vallist = new ArrayList(); + StringBuilder sb = new StringBuilder(); + //\TODO Perform a batch operation instead of each row at a time + for (Map map : objlist) { + pQueryObject = new PreparedQueryObject(); + sb.setLength(0); + vallist.clear(); + String pfx = ""; + for (String key : map.keySet()) { + sb.append(pfx).append(key).append("=?"); + vallist.add(map.get(key)); + pQueryObject.addValue(map.get(key)); + pfx = " AND "; + } + + String cql = pre_cql + sb.toString(); + System.out.println("readDirtyRowsAndUpdateDb: cql: "+cql); + pQueryObject.appendQueryString(cql); + ResultSet dirtyRows = null; + try { + //\TODO Why is this an eventual put?, this should be an atomic + dirtyRows = MusicCore.get(pQueryObject); + } catch (MusicServiceException e) { + + e.printStackTrace(); + } + /* + Session sess = getMusicSession(); + PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(vallist.toArray()); + bound.setReadTimeoutMillis(60000); + ResultSet dirtyRows = null; + synchronized (sess) { + dirtyRows = sess.execute(bound); + }*/ + List rows = dirtyRows.all(); + if (rows.isEmpty()) { + // No rows, the row must have been deleted + deleteRowFromSqlDb(dbi,tableName, map); + } else { + for (Row row : rows) { + writeMusicRowToSQLDb(dbi,tableName, row); + } + } + } + } + + private void deleteRowFromSqlDb(DBInterface dbi, String tableName, Map map) { + dbi.deleteRowFromSqlDb(tableName, map); + TableInfo ti = dbi.getTableInfo(tableName); + List vallist = new ArrayList(); + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + String col = ti.columns.get(i); + Object val = map.get(col); + vallist.add(val); + } + } + cleanDirtyRow(ti, tableName, new JSONObject(vallist)); + } + /** + * This functions copies the contents of a row in Music into the corresponding row in the SQL table + * @param tableName This is the name of the table in both Music and swl + * @param musicRow This is the row in Music that is being copied into SQL + */ + private void writeMusicRowToSQLDb(DBInterface dbi, String tableName, Row musicRow) { + // First construct the map of columns and their values + TableInfo ti = dbi.getTableInfo(tableName); + Map map = new HashMap(); + List vallist = new ArrayList(); + String rowid = tableName; + for (String col : ti.columns) { + Object val = getValue(musicRow, col); + map.put(col, val); + if (ti.iskey(col)) { + vallist.add(val); + rowid += "_" + val.toString(); + } + } + + logger.debug("Blocking rowid: "+rowid); + in_progress.add(rowid); // Block propagation of the following INSERT/UPDATE + + dbi.insertRowIntoSqlDb(tableName, map); + + logger.debug("Unblocking rowid: "+rowid); + in_progress.remove(rowid); // Unblock propagation + +// try { +// String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString()); +// executeSQLWrite(sql); +// } catch (SQLException e) { +// logger.debug("Insert failed because row exists, do an update"); +// // TODO - rewrite this UPDATE command should not update key fields +// String sql = String.format("UPDATE %s SET (%s) = (%s) WHERE %s", tableName, fields.toString(), values.toString(), where.toString()); +// try { +// executeSQLWrite(sql); +// } catch (SQLException e1) { +// e1.printStackTrace(); +// } +// } + + ti = dbi.getTableInfo(tableName); + cleanDirtyRow(ti, tableName, new JSONObject(vallist)); + +// String selectQuery = "select "+ primaryKeyName+" FROM "+tableName+" WHERE "+primaryKeyName+"="+primaryKeyValue+";"; +// java.sql.ResultSet rs = executeSQLRead(selectQuery); +// String dbWriteQuery=null; +// try { +// if(rs.next()){//this entry is there, do an update +// dbWriteQuery = "UPDATE "+tableName+" SET "+columnNameString+" = "+ valueString +"WHERE "+primaryKeyName+"="+primaryKeyValue+";"; +// }else +// dbWriteQuery = "INSERT INTO "+tableName+" VALUES"+valueString+";"; +// executeSQLWrite(dbWriteQuery); +// } catch (SQLException e) { +// // ZZTODO Auto-generated catch block +// e.printStackTrace(); +// } + + //clean the music dirty bits table +// String dirtyRowIdsTableName = music_ns+".DIRTY_"+tableName+"_"+myId; +// String deleteQuery = "DELETE FROM "+dirtyRowIdsTableName+" WHERE dirtyRowKeys=$$"+primaryKeyValue+"$$;"; +// executeMusicWriteQuery(deleteQuery); + } + private Object getValue(Row musicRow, String colname) { + ColumnDefinitions cdef = musicRow.getColumnDefinitions(); + DataType colType; + try { + colType= cdef.getType(colname); + } + catch(IllegalArgumentException e) { + logger.warn("Colname is not part of table metadata: "+e); + throw e; + } + String typeStr = colType.getName().toString().toUpperCase(); + switch (typeStr) { + case "BIGINT": + return musicRow.getLong(colname); + case "BOOLEAN": + return musicRow.getBool(colname); + case "BLOB": + return musicRow.getString(colname); + case "DATE": + return musicRow.getString(colname); + case "DECIMAL": + return musicRow.getDecimal(colname); + case "DOUBLE": + return musicRow.getDouble(colname); + case "SMALLINT": + case "INT": + return musicRow.getInt(colname); + case "TIMESTAMP": + return musicRow.getTimestamp(colname); + case "UUID": + return musicRow.getUUID(colname); + default: + logger.error(EELFLoggerDelegate.errorLogger, "UNEXPECTED COLUMN TYPE: columname="+colname+", columntype="+typeStr); + // fall thru + case "VARCHAR": + return musicRow.getString(colname); + } + } + + /** + * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the + * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates + * it to the other replicas. + * + * @param tableName This is the table that has changed. + * @param changedRow This is information about the row that has changed + */ + @Override + public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) { + // Build the CQL command + Object[] objects = getObjects(ti,tableName,changedRow); + StringBuilder fields = new StringBuilder(); + StringBuilder values = new StringBuilder(); + String rowid = tableName; + Object[] newrow = new Object[objects.length]; + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + String pfx = ""; + int keyoffset=0; + for (int i = 0; i < objects.length; i++) { + if (!ti.hasKey() && i==0) { + //We need to tack on cassandra's uid in place of a primary key + fields.append(MDBC_PRIMARYKEY_NAME); + values.append("?"); + newrow[i] = UUID.fromString((String) objects[i]); + pQueryObject.addValue(newrow[i]); + keyoffset=-1; + pfx = ", "; + continue; + } + fields.append(pfx).append(ti.columns.get(i+keyoffset)); + values.append(pfx).append("?"); + pfx = ", "; + if (objects[i] instanceof byte[]) { + // Cassandra doesn't seem to have a Codec to translate a byte[] to a ByteBuffer + newrow[i] = ByteBuffer.wrap((byte[]) objects[i]); + pQueryObject.addValue(newrow[i]); + } else if (objects[i] instanceof Reader) { + // Cassandra doesn't seem to have a Codec to translate a Reader to a ByteBuffer either... + newrow[i] = ByteBuffer.wrap(readBytesFromReader((Reader) objects[i])); + pQueryObject.addValue(newrow[i]); + } else { + newrow[i] = objects[i]; + pQueryObject.addValue(newrow[i]); + } + if (i+keyoffset>=0 && ti.iskey.get(i+keyoffset)) { + rowid += "_" + newrow[i].toString(); + } + } + + if (in_progress.contains(rowid)) { + // This call to updateDirtyRowAndEntityTableInMusic() was called as a result of a Cassandra -> H2 update; ignore + logger.debug(EELFLoggerDelegate.applicationLogger, "updateDirtyRowAndEntityTableInMusic: bypassing MUSIC update on "+rowid); + + } else { + // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update + String cql = String.format("INSERT INTO %s.%s (%s) VALUES (%s);", music_ns, tableName, fields.toString(), values.toString()); + + pQueryObject.appendQueryString(cql); + String primaryKey = getMusicKeyFromRow(ti,tableName, changedRow); + updateMusicDB(tableName, primaryKey, pQueryObject); + + /*PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(newrow); + bound.setReadTimeoutMillis(60000); + Session sess = getMusicSession(); + synchronized (sess) { + sess.execute(bound); + }*/ + // Mark the dirty rows in music for all the replicas but us + markDirtyRow(ti,tableName, changedRow); + } + } + + + + private byte[] readBytesFromReader(Reader rdr) { + StringBuilder sb = new StringBuilder(); + try { + int ch; + while ((ch = rdr.read()) >= 0) { + sb.append((char)ch); + } + } catch (IOException e) { + logger.warn("readBytesFromReader: "+e); + } + return sb.toString().getBytes(); + } + + protected PreparedStatement getPreparedStatementFromCache(String cql) { + // Note: have to hope that the Session never changes! + if (!ps_cache.containsKey(cql)) { + Session sess = getMusicSession(); + PreparedStatement ps = sess.prepare(cql); + ps_cache.put(cql, ps); + } + return ps_cache.get(cql); + } + + /** + * This method gets a connection to Music + * @return the Cassandra Session to use + */ + protected Session getMusicSession() { + // create cassandra session + if (musicSession == null) { + logger.info(EELFLoggerDelegate.applicationLogger, "Creating New Music Session"); + mCon = new MusicConnector(musicAddress); + musicSession = mCon.getSession(); + } + return musicSession; + } + + /** + * This method executes a write query in Music + * @param cql the CQL to be sent to Cassandra + */ + protected void executeMusicWriteQuery(String cql) { + logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql); + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + ReturnType rt = MusicCore.eventualPut(pQueryObject); + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage()); + } + /*Session sess = getMusicSession(); + SimpleStatement s = new SimpleStatement(cql); + s.setReadTimeoutMillis(60000); + synchronized (sess) { + sess.execute(s); + }*/ + } + + /** + * This method executes a read query in Music + * @param cql the CQL to be sent to Cassandra + * @return a ResultSet containing the rows returned from the query + */ + protected ResultSet executeMusicRead(String cql) { + logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql); + PreparedQueryObject pQueryObject = new PreparedQueryObject(); + pQueryObject.appendQueryString(cql); + ResultSet results = null; + try { + results = MusicCore.get(pQueryObject); + } catch (MusicServiceException e) { + + e.printStackTrace(); + } + return results; + /*Session sess = getMusicSession(); + synchronized (sess) { + return sess.execute(cql); + }*/ + } + + /** + * Returns the default primary key name that this mixin uses + */ + public String getMusicDefaultPrimaryKeyName() { + return MDBC_PRIMARYKEY_NAME; + } + + /** + * Return the function for cassandra's primary key generation + */ + public String generateUniqueKey() { + return UUID.randomUUID().toString(); + } + + @Override + public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow) { + //\TODO this operation is super expensive to perform, both latency and BW + // it is better to add additional where clauses, and have the primary key + // to be composed of known columns of the table + // Adding this primary indexes would be an additional burden to the developers, which spanner + // also does, but otherwise performance is really bad + // At least it should have a set of columns that are guaranteed to be unique + StringBuilder cqlOperation = new StringBuilder(); + cqlOperation.append("SELECT * FROM ") + .append(music_ns) + .append(".") + .append(table); + ResultSet musicResults = executeMusicRead(cqlOperation.toString()); + Object[] dbRowObjects = getObjects(ti,table,dbRow); + while (!musicResults.isExhausted()) { + Row musicRow = musicResults.one(); + if (rowIs(ti, musicRow, dbRowObjects)) { + return ((UUID)getValue(musicRow, MDBC_PRIMARYKEY_NAME)).toString(); + } + } + //should never reach here + return null; + } + + /** + * Checks to see if this row is in list of database entries + * @param ti + * @param musicRow + * @param dbRow + * @return + */ + private boolean rowIs(TableInfo ti, Row musicRow, Object[] dbRow) { + //System.out.println("Comparing " + musicRow.toString()); + boolean sameRow=true; + for (int i=0; i keyCols = ti.getKeyColumns(); + if(keyCols.isEmpty()){ + throw new IllegalArgumentException("Table doesn't have defined primary indexes "); + } + StringBuilder key = new StringBuilder(); + String pfx = ""; + for(String keyCol: keyCols) { + key.append(pfx); + key.append(row.getString(keyCol)); + pfx = ","; + } + String keyStr = key.toString(); + return keyStr; + } + + public void updateMusicDB(String tableName, String primaryKey, PreparedQueryObject pQObject) { + if(MusicMixin.criticalTables.contains(tableName)) { + ReturnType rt = null; + try { + rt = MusicCore.atomicPut(music_ns, tableName, primaryKey, pQObject, null); + } catch (MusicLockingException e) { + e.printStackTrace(); + } catch (MusicServiceException e) { + e.printStackTrace(); + } catch (MusicQueryException e) { + e.printStackTrace(); + } + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + System.out.println("Failure while critical put..."+rt.getMessage()); + } + } else { + ReturnType rt = MusicCore.eventualPut(pQObject); + if(rt.getResult().getResult().toLowerCase().equals("failure")) { + System.out.println("Failure while critical put..."+rt.getMessage()); + } + } + } + + + private PreparedQueryObject createAppendMtxdIndexToMriQuery(String mriTable, String uuid, String table, UUID redoUuid){ + PreparedQueryObject query = new PreparedQueryObject(); + StringBuilder appendBuilder = new StringBuilder(); + appendBuilder.append("UPDATE ") + .append(music_ns) + .append(".") + .append(mriTable) + .append(" SET redo = redo +[('") + .append(table) + .append("',") + .append(redoUuid) + .append(")] WHERE id = ") + .append(uuid) + .append(";"); + query.appendQueryString(appendBuilder.toString()); + return query; + } + + protected String createAndAssignLock(String fullyQualifiedKey, DatabasePartition partition, String keyspace, String table, String key) throws MDBCServiceException { + String lockId; + lockId = MusicCore.createLockReference(fullyQualifiedKey); + ReturnType lockReturn; + try { + lockReturn = MusicCore.acquireLock(fullyQualifiedKey,lockId); + } catch (MusicLockingException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Lock was not acquire correctly for key "+fullyQualifiedKey); + throw new MDBCServiceException("Lock was not acquire correctly for key "+fullyQualifiedKey); + } catch (MusicServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey); + throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey); + } catch (MusicQueryException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey); + throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey); + } + //\TODO this is wrong, we should have a better way to obtain a lock forcefully, clean the queue and obtain the lock + if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) { + try { + MusicCore.forciblyReleaseLock(fullyQualifiedKey,lockId); + CassaLockStore lockingServiceHandle = MusicCore.getLockingServiceHandle(); + CassaLockStore.LockObject lockOwner = lockingServiceHandle.peekLockQueue(keyspace, table, key); + while(lockOwner.lockRef != lockId) { + MusicCore.forciblyReleaseLock(fullyQualifiedKey, lockOwner.lockRef); + try { + lockOwner = lockingServiceHandle.peekLockQueue(keyspace, table, key); + } catch(NullPointerException e){ + //Ignore null pointer exception + lockId = MusicCore.createLockReference(fullyQualifiedKey); + break; + } + } + lockReturn = MusicCore.acquireLock(fullyQualifiedKey,lockId); + + } catch (MusicLockingException e) { + throw new MDBCServiceException("Could not lock the corresponding lock"); + } catch (MusicServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey); + throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey); + } catch (MusicQueryException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey); + throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey); + } + } + if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) { + throw new MDBCServiceException("Could not lock the corresponding lock"); + } + //TODO: Java newbie here, verify that this lockId is actually assigned to the global DatabasePartition in the StateManager instance + partition.setLockId(lockId); + return lockId; + } + + protected void pushRowToMtxd(UUID commitId, HashMap transactionDigest) throws MDBCServiceException{ + PreparedQueryObject query = new PreparedQueryObject(); + StringBuilder cqlQuery = new StringBuilder("INSERT INTO ") + .append(music_ns) + .append('.') + .append(musicTxDigestTableName) + .append(" (txid,transactiondigest) ") + .append("VALUES ('") + .append( commitId ).append(",'"); + try { + cqlQuery.append( MDBCUtils.toString(transactionDigest) ); + } catch (IOException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId); + throw new MDBCServiceException("Transaction Digest serialization was invalid for commit "+commitId); + } + cqlQuery.append("');"); + query.appendQueryString(cqlQuery.toString()); + //\TODO check if I am not shooting on my own foot + try { + MusicCore.nonKeyRelatedPut(query,"critical"); + } catch (MusicServiceException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId); + throw new MDBCServiceException("Transaction Digest serialization for commit "+commitId); + } + } + + protected void appendIndexToMri(String lockId, UUID commitId, String MriIndex) throws MDBCServiceException{ + PreparedQueryObject appendQuery = createAppendMtxdIndexToMriQuery(musicRangeInformationTableName, MriIndex, musicTxDigestTableName, commitId); + ReturnType returnType = MusicCore.criticalPut(music_ns, musicRangeInformationTableName, MriIndex, appendQuery, lockId, null); + if(returnType.getResult().compareTo(ResultType.SUCCESS) != 0 ){ + logger.error(EELFLoggerDelegate.errorLogger, "Error when executing append operation with return type: "+returnType.getMessage()); + throw new MDBCServiceException("Error when executing append operation with return type: "+returnType.getMessage()); + } + } + + @Override + public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap transactionDigest, String txId ,TxCommitProgress progressKeeper) throws MDBCServiceException{ + String MriIndex = partition.getMusicRangeInformationIndex(); + if(MriIndex.isEmpty()) { + //\TODO Fetch MriIndex from the Range Information Table + throw new MDBCServiceException("TIT Index retrieval not yet implemented"); + } + String fullyQualifiedTitKey = music_ns+"."+ musicRangeInformationTableName +"."+MriIndex; + //0. See if reference to lock was already created + String lockId = partition.getLockId(); + if(lockId == null || lockId.isEmpty()) { + lockId = createAndAssignLock(fullyQualifiedTitKey,partition,music_ns, musicRangeInformationTableName,MriIndex); + } + + UUID commitId; + //Generate a local commit id + if(progressKeeper.containsTx(txId)) { + commitId = progressKeeper.getCommitId(txId); + } + else{ + logger.error(EELFLoggerDelegate.errorLogger, "Tx with id "+txId+" was not created in the TxCommitProgress "); + throw new MDBCServiceException("Tx with id "+txId+" was not created in the TxCommitProgress "); + } + //Add creation type of transaction digest + + //1. Push new row to RRT and obtain its index + pushRowToMtxd(commitId, transactionDigest); + + //2. Save RRT index to RQ + if(progressKeeper!= null) { + progressKeeper.setRecordId(txId,new MusixTxDigestId(commitId)); + } + //3. Append RRT index into the corresponding TIT row array + appendIndexToMri(lockId,commitId,MriIndex); + } + + /** + * @param tableName + * @param string + * @param rowValues + * @return + */ + @SuppressWarnings("unused") + private String getUid(String tableName, String string, Object[] rowValues) { + // + // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update + String cql = String.format("SELECT * FROM %s.%s;", music_ns, tableName); + PreparedStatement ps = getPreparedStatementFromCache(cql); + BoundStatement bound = ps.bind(); + bound.setReadTimeoutMillis(60000); + Session sess = getMusicSession(); + ResultSet rs; + synchronized (sess) { + rs = sess.execute(bound); + } + + // + //should never reach here + logger.error(EELFLoggerDelegate.errorLogger, "Could not find the row in the primary key"); + + return null; + } + + @Override + public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) { + // \FIXME: we may need to add the primary key of the row if it was autogenerated by MUSIC + List cols = ti.columns; + int size = cols.size(); + boolean hasDefault = false; + if(row.has(getMusicDefaultPrimaryKeyName())) { + size++; + hasDefault = true; + } + + Object[] objects = new Object[size]; + int idx = 0; + if(hasDefault) { + objects[idx++] = row.getString(getMusicDefaultPrimaryKeyName()); + } + for(String col : ti.columns) { + objects[idx]=row.get(col); + } + return objects; + } + + @Override + + public MusicRangeInformationRow getMusicRangeInformation(UUID id){ + throw new UnsupportedOperationException(); + } + + @Override + public MriReference createMusicRangeInformation(MusicRangeInformationRow info){ + throw new UnsupportedOperationException(); + } + + @Override + public void appendToRedoLog(MriReference mriRowId, DatabasePartition partition, MusixTxDigestId newRecord){ + throw new UnsupportedOperationException(); + } + + @Override + public void addTxDigest(String musicTxDigestTable, MusixTxDigestId newId, String transactionDigest){ + throw new UnsupportedOperationException(); + } + + @Override + public List getPartitionInformation(DatabasePartition partition){ + throw new UnsupportedOperationException(); + } + + @Override + public HashMap getTransactionDigest(MusixTxDigestId id){ + throw new UnsupportedOperationException(); + } + + @Override + public void own(List ranges){ + throw new UnsupportedOperationException(); + } + + @Override + public void appendRange(String rangeId, List ranges){ + throw new UnsupportedOperationException(); + } + + @Override + public void relinquish(String ownerId, String rangeId){ + throw new UnsupportedOperationException(); + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/DBInterface.java b/src/main/java/org/onap/music/mdbc/mixins/DBInterface.java new file mode 100755 index 0000000..2ff88a2 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/DBInterface.java @@ -0,0 +1,92 @@ +package org.onap.music.mdbc.mixins; + +import java.sql.ResultSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.onap.music.mdbc.Range; +import org.onap.music.mdbc.TableInfo; +import org.onap.music.mdbc.tables.StagingTable; + +/** + * This Interface defines the methods that MDBC needs in order to mirror data to/from a Database instance. + * + * @author Robert P. Eby + */ +public interface DBInterface { + /** + * Get the name of this DBnterface mixin object. + * @return the name + */ + String getMixinName(); + /** + * Do what is needed to close down the database connection. + */ + void close(); + /** + * Get a set of the table names in the database. The table names should be returned in UPPER CASE. + * @return the set + */ + Set getSQLTableSet(); + /** + * Return the name of the database that the driver is connected to + * @return + */ + String getDatabaseName(); + /** + * Return a TableInfo object for the specified table. + * @param tableName the table to look up + * @return a TableInfo object containing the info we need, or null if the table does not exist + */ + TableInfo getTableInfo(String tableName); + /** + * This method should create triggers in the database to be called for each row after every INSERT, + * UPDATE and DELETE, and before every SELECT. + * @param tableName this is the table on which triggers are being created. + */ + void createSQLTriggers(String tableName); + /** + * This method should drop all triggers previously created in the database for the table. + * @param tableName this is the table on which triggers are being dropped. + */ + void dropSQLTriggers(String tableName); + /** + * This method inserts a row into the SQL database, defined via a map of column names and values. + * @param tableName the table to insert the row into + * @param map map of column names → values to use for the keys when inserting the row + */ + void insertRowIntoSqlDb(String tableName, Map map); + /** + * This method deletes a row from the SQL database, defined via a map of column names and values. + * @param tableName the table to delete the row from + * @param map map of column names → values to use for the keys when deleting the row + */ + void deleteRowFromSqlDb(String tableName, Map map); + /** + * Code to be run within the DB driver before a SQL statement is executed. This is where tables + * can be synchronized before a SELECT, for those databases that do not support SELECT triggers. + * @param sql the SQL statement that is about to be executed + */ + void preStatementHook(final String sql); + /** + * Code to be run within the DB driver after a SQL statement has been executed. This is where remote + * statement actions can be copied back to Cassandra/MUSIC. + * @param sql the SQL statement that was executed + * @param transactionDigest + */ + void postStatementHook(final String sql,Map transactionDigest); + /** + * This method executes a read query in the SQL database. Methods that call this method should be sure + * to call resultset.getStatement().close() when done in order to free up resources. + * @param sql the query to run + * @return a ResultSet containing the rows returned from the query + */ + ResultSet executeSQLRead(String sql); + + void synchronizeData(String tableName); + + List getReservedTblNames(); + + String getPrimaryKey(String sql, String tableName); +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/MixinFactory.java b/src/main/java/org/onap/music/mdbc/mixins/MixinFactory.java new file mode 100755 index 0000000..de46187 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/MixinFactory.java @@ -0,0 +1,125 @@ +package org.onap.music.mdbc.mixins; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.sql.Connection; +import java.util.Properties; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.DatabasePartition; +import org.onap.music.mdbc.MusicSqlManager; + +/** + * This class is used to construct instances of Mixins that implement either the {@link org.onap.music.mdbc.mixins.DBInterface} + * interface, or the {@link org.onap.music.mdbc.mixins.MusicInterface} interface. The Mixins are searched for in the CLASSPATH. + * + * @author Robert P. Eby + */ +public class MixinFactory { + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MixinFactory.class); + + // Only static methods... + private MixinFactory(){} + + /** + * Look for a class in CLASSPATH that implements the {@link DBInterface} interface, and has the mixin name name. + * If one is found, construct and return it, using the other arguments for the constructor. + * @param name the name of the Mixin + * @param msm the MusicSqlManager to use as an argument to the constructor + * @param url the URL to use as an argument to the constructor + * @param conn the underlying JDBC Connection + * @param info the Properties to use as an argument to the constructor + * @return the newly constructed DBInterface, or null if one cannot be found. + */ + public static DBInterface createDBInterface(String name, MusicSqlManager msm, String url, Connection conn, Properties info) { + for (Class cl : Utils.getClassesImplementing(DBInterface.class)) { + try { + Constructor con = cl.getConstructor(); + if (con != null) { + DBInterface dbi = (DBInterface) con.newInstance(); + String miname = dbi.getMixinName(); + logger.info(EELFLoggerDelegate.applicationLogger,"Checking "+miname); + if (miname.equalsIgnoreCase(name)) { + con = cl.getConstructor(MusicSqlManager.class, String.class, Connection.class, Properties.class); + if (con != null) { + logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname); + return (DBInterface) con.newInstance(msm, url, conn, info); + } + } + } + } catch (Exception e) { + logger.error(EELFLoggerDelegate.errorLogger,"createDBInterface: "+e); + } + } + return null; + } + /** + * Look for a class in CLASSPATH that implements the {@link MusicInterface} interface, and has the mixin name name. + * If one is found, construct and return it, using the other arguments for the constructor. + * @param name the name of the Mixin + * @param msm the MusicSqlManager to use as an argument to the constructor + * @param dbi the DBInterface to use as an argument to the constructor + * @param url the URL to use as an argument to the constructor + * @param info the Properties to use as an argument to the constructor + * @return the newly constructed MusicInterface, or null if one cannot be found. + */ + public static MusicInterface createMusicInterface(String name, String url, Properties info, DatabasePartition ranges) { + for (Class cl : Utils.getClassesImplementing(MusicInterface.class)) { + try { + Constructor con = cl.getConstructor(); + if (con != null) { //TODO: is this necessary? Don't think it could ever be null? + MusicInterface mi = (MusicInterface) con.newInstance(); + String miname = mi.getMixinName(); + logger.info(EELFLoggerDelegate.applicationLogger, "Checking "+miname); + if (miname.equalsIgnoreCase(name)) { + con = cl.getConstructor(String.class, Properties.class, DatabasePartition.class); + if (con != null) { + logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname); + return (MusicInterface) con.newInstance(url, info, ranges); + } + } + } + } catch (InvocationTargetException e) { + logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e.getCause().toString()); + } + catch (Exception e) { + logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e); + } + } + return null; + } + + // Unfortunately, this version does not work when MDBC is built as a JBoss module, + // where something funny is happening with the classloaders +// @SuppressWarnings("unused") +// private static List> getClassesImplementingOld(Class implx) { +// List> list = new ArrayList>(); +// try { +// ClassLoader cldr = MixinFactory.class.getClassLoader(); +// while (cldr != null) { +// ClassPath cp = ClassPath.from(cldr); +// for (ClassPath.ClassInfo x : cp.getAllClasses()) { +// if (x.toString().startsWith("com.att.")) { // mixins must have a package starting with com.att. +// Class cl = x.load(); +// if (impl(cl, implx)) { +// list.add(cl); +// } +// } +// } +// cldr = cldr.getParent(); +// } +// } catch (IOException e) { +// // ignore +// } +// return list; +// } + static boolean impl(Class cl, Class imp) { + for (Class c2 : cl.getInterfaces()) { + if (c2 == imp) { + return true; + } + } + Class c2 = cl.getSuperclass(); + return (c2 != null) ? impl(c2, imp) : false; + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/MusicConnector.java b/src/main/java/org/onap/music/mdbc/mixins/MusicConnector.java new file mode 100755 index 0000000..11322fe --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/MusicConnector.java @@ -0,0 +1,124 @@ +package org.onap.music.mdbc.mixins; + +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.SocketException; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.Iterator; +import java.util.List; + +import org.onap.music.logging.EELFLoggerDelegate; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.PoolingOptions; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import org.onap.music.main.MusicCore; + +/** + * This class allows for management of the Cassandra Cluster and Session objects. + * + * @author Robert P. Eby + */ +public class MusicConnector { + + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicConnector.class); + + private Session session; + private Cluster cluster; + + protected MusicConnector() { + //to defeat instantiation since this is a singleton + } + + public MusicConnector(String address) { +// connectToCassaCluster(address); + connectToMultipleAddresses(address); + } + + public Session getSession() { + return session; + } + + public void close() { + if (session != null) + session.close(); + session = null; + if (cluster != null) + cluster.close(); + cluster = null; + } + + private List getAllPossibleLocalIps(){ + ArrayList allPossibleIps = new ArrayList(); + try { + Enumeration en = NetworkInterface.getNetworkInterfaces(); + while(en.hasMoreElements()){ + NetworkInterface ni=(NetworkInterface) en.nextElement(); + Enumeration ee = ni.getInetAddresses(); + while(ee.hasMoreElements()) { + InetAddress ia= (InetAddress) ee.nextElement(); + allPossibleIps.add(ia.getHostAddress()); + } + } + } catch (SocketException e) { + e.printStackTrace(); + } + return allPossibleIps; + } + + private void connectToMultipleAddresses(String address) { + MusicCore.getDSHandle(address); + /* + PoolingOptions poolingOptions = + new PoolingOptions() + .setConnectionsPerHost(HostDistance.LOCAL, 4, 10) + .setConnectionsPerHost(HostDistance.REMOTE, 2, 4); + String[] music_hosts = address.split(","); + if (cluster == null) { + logger.info(EELFLoggerDelegate.applicationLogger,"Initializing MUSIC Client with endpoints "+address); + cluster = Cluster.builder() + .withPort(9042) + .withPoolingOptions(poolingOptions) + .withoutMetrics() + .addContactPoints(music_hosts) + .build(); + Metadata metadata = cluster.getMetadata(); + logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address); + + } + session = cluster.connect(); + */ + } + + @SuppressWarnings("unused") + private void connectToCassaCluster(String address) { + PoolingOptions poolingOptions = + new PoolingOptions() + .setConnectionsPerHost(HostDistance.LOCAL, 4, 10) + .setConnectionsPerHost(HostDistance.REMOTE, 2, 4); + Iterator it = getAllPossibleLocalIps().iterator(); + logger.info(EELFLoggerDelegate.applicationLogger,"Iterating through possible ips:"+getAllPossibleLocalIps()); + + while (it.hasNext()) { + try { + cluster = Cluster.builder() + .withPort(9042) + .withPoolingOptions(poolingOptions) + .withoutMetrics() + .addContactPoint(address) + .build(); + //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE); + Metadata metadata = cluster.getMetadata(); + logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address); + + session = cluster.connect(); + break; + } catch (NoHostAvailableException e) { + address = it.next(); + } + } + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/MusicInterface.java b/src/main/java/org/onap/music/mdbc/mixins/MusicInterface.java new file mode 100755 index 0000000..abf8f36 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/MusicInterface.java @@ -0,0 +1,173 @@ +package org.onap.music.mdbc.mixins; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.json.JSONObject; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.mdbc.DatabasePartition; +import org.onap.music.mdbc.Range; +import org.onap.music.mdbc.TableInfo; +import org.onap.music.mdbc.tables.PartitionInformation; +import org.onap.music.mdbc.tables.MusixTxDigestId; +import org.onap.music.mdbc.tables.StagingTable; +import org.onap.music.mdbc.tables.MriReference; +import org.onap.music.mdbc.tables.MusicRangeInformationRow; +import org.onap.music.mdbc.tables.TxCommitProgress; + +/** + * This Interface defines the methods that MDBC needs for a class to provide access to the persistence layer of MUSIC. + * + * @author Robert P. Eby + */ +public interface MusicInterface { + /** + * This function is used to created all the required data structures, both local + * \TODO Check if this function is required in the MUSIC interface or could be just created on the constructor + */ + void initializeMetricDataStructures() throws MDBCServiceException; + /** + * Get the name of this MusicInterface mixin object. + * @return the name + */ + String getMixinName(); + /** + * Gets the name of this MusicInterface mixin's default primary key name + * @return default primary key name + */ + String getMusicDefaultPrimaryKeyName(); + /** + * generates a key or placeholder for what is required for a primary key + * @return a primary key + */ + String generateUniqueKey(); + + /** + * Find the key used with Music for a table that was created without a primary index + * Name is long to avoid developers using it. For cassandra performance in this operation + * is going to be really bad + * @param ti information of the table in the SQL layer + * @param table name of the table + * @param dbRow row obtained from the SQL layer + * @return key associated with the row + */ + String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow); + /** + * Do what is needed to close down the MUSIC connection. + */ + void close(); + /** + * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables. + * The keyspace name comes from the initialization properties passed to the JDBC driver. + */ + void createKeyspace(); + /** + * This method performs all necessary initialization in Music/Cassandra to store the table tableName. + * @param tableName the table to initialize MUSIC for + */ + void initializeMusicForTable(TableInfo ti, String tableName); + /** + * Create a dirty row table for the real table tableName. The primary keys columns from the real table are recreated in + * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC. + * @param tableName the table to create a "dirty" table for + */ + void createDirtyRowTable(TableInfo ti, String tableName); + /** + * Drop the dirty row table for tableName from MUSIC. + * @param tableName the table being dropped + */ + void dropDirtyRowTable(String tableName); + /** + * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first. + * @param tableName This is the table that has been dropped + */ + void clearMusicForTable(String tableName); + /** + * Mark rows as "dirty" in the dirty rows table for tableName. Rows are marked for all replicas but + * this one (this replica already has the up to date data). + * @param tableName the table we are marking dirty + * @param keys an ordered list of the values being put into the table. The values that correspond to the tables' + * primary key are copied into the dirty row table. + */ + void markDirtyRow(TableInfo ti, String tableName, JSONObject keys); + /** + * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys + * @param tableName the table we are removing dirty entries from + * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. + */ + void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys); + /** + * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica, + * and consist of a Map of primary key column names and values. + * @param tableName the table we are querying for + * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row". + */ + List> getDirtyRows(TableInfo ti, String tableName); + /** + * This method is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the + * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates + * it to the other replicas. + * @param tableName This is the table that has changed. + * @param oldRow This is a copy of the old row being deleted + */ + void deleteFromEntityTableInMusic(TableInfo ti,String tableName, JSONObject oldRow); + /** + * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local + * dirty bits table to see if there are any rows in Cassandra whose value needs to be copied to the local SQL DB. + * @param tableName This is the table on which the select is being performed + */ + void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName); + /** + * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the + * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates + * it to the other replicas. + * @param tableName This is the table that has changed. + * @param changedRow This is information about the row that has changed + */ + void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow); + + Object[] getObjects(TableInfo ti, String tableName, JSONObject row); + /** + * Returns the primary key associated with the given row + * @param ti info of the table that is associated with the row + * @param tableName name of the table that contains the row + * @param changedRow row that is going to contain the information associated with the primary key + * @return primary key of the row + */ + String getMusicKeyFromRow(TableInfo ti, String tableName, JSONObject changedRow); + + /** + * Commits the corresponding REDO-log into MUSIC + * + * @param dbi, the database interface use in the local SQL cache, where the music interface is being used + * @param partition + * @param transactionDigest digest of the transaction that is being committed into the Redo log in music. It has to be a HashMap, because it is required to be serializable + * @param txId id associated with the log being send + * @param progressKeeper data structure that is used to handle to detect failures, and know what to do + * @throws MDBCServiceException + */ + void commitLog(DBInterface dbi, DatabasePartition partition, HashMap transactionDigest, String txId,TxCommitProgress progressKeeper) throws MDBCServiceException; + + MusicRangeInformationRow getMusicRangeInformation(UUID id); + + MriReference createMusicRangeInformation(MusicRangeInformationRow info); + + void appendToRedoLog(MriReference mriRowId, DatabasePartition partition, MusixTxDigestId newRecord); + + void addTxDigest(String musicTxDigestTable, MusixTxDigestId newId, String transactionDigest); + + List getPartitionInformation(DatabasePartition partition); + + HashMap getTransactionDigest(MusixTxDigestId id); + + void own(List ranges); + + void appendRange(String rangeId, List ranges); + + void relinquish(String ownerId, String rangeId); + +} + diff --git a/src/main/java/org/onap/music/mdbc/mixins/MusicMixin.java b/src/main/java/org/onap/music/mdbc/mixins/MusicMixin.java new file mode 100644 index 0000000..f7b667d --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/MusicMixin.java @@ -0,0 +1,233 @@ +package org.onap.music.mdbc.mixins; + +import java.io.IOException; +import java.io.InputStream; +import java.util.*; + +import org.onap.music.mdbc.LockId; +import org.json.JSONObject; +import org.onap.music.exceptions.MusicLockingException; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.mdbc.DatabasePartition; +import org.onap.music.mdbc.Range; +import org.onap.music.mdbc.TableInfo; +import org.onap.music.mdbc.tables.PartitionInformation; +import org.onap.music.mdbc.tables.MusixTxDigestId; +import org.onap.music.mdbc.tables.StagingTable; +import org.onap.music.mdbc.tables.MriReference; +import org.onap.music.mdbc.tables.MusicRangeInformationRow; +import org.onap.music.mdbc.tables.TxCommitProgress; + +import org.onap.music.main.MusicCore; + +/** + + * + */ +public class MusicMixin implements MusicInterface { + + public static Map> currentLockMap = new HashMap<>(); + public static List criticalTables = new ArrayList<>(); + + @Override + public String getMixinName() { + // + return null; + } + + @Override + public String getMusicDefaultPrimaryKeyName() { + // + return null; + } + + @Override + public String generateUniqueKey() { + // + return null; + } + + @Override + public String getMusicKeyFromRow(TableInfo ti, String table, JSONObject dbRow) { + // + return null; + } + + @Override + public void close() { + // + + } + + @Override + public void createKeyspace() { + // + + } + + @Override + public void initializeMusicForTable(TableInfo ti, String tableName) { + // + + } + + @Override + public void createDirtyRowTable(TableInfo ti, String tableName) { + // + + } + + @Override + public void dropDirtyRowTable(String tableName) { + // + + } + + @Override + public void clearMusicForTable(String tableName) { + // + + } + + @Override + public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) { + // + + } + + @Override + public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) { + // + + } + + @Override + public List> getDirtyRows(TableInfo ti, String tableName) { + // + return null; + } + + @Override + public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) { + // + + } + + @Override + public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) { + // + + } + + @Override + public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) { + updateDirtyRowAndEntityTableInMusic(tableName, changedRow, false); + + } + + public void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow, boolean isCritical) { + } + + + public static void loadProperties() { + Properties prop = new Properties(); + InputStream input = null; + try { + input = MusicMixin.class.getClassLoader().getResourceAsStream("mdbc.properties"); + prop.load(input); + String crTable = prop.getProperty("critical.tables"); + String[] tableArr = crTable.split(","); + criticalTables = Arrays.asList(tableArr); + + } catch (Exception ex) { + ex.printStackTrace(); + } finally { + if (input != null) { + try { + input.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } + + public static void releaseZKLocks(Set lockIds) { + for (LockId lockId : lockIds) { + System.out.println("Releasing lock: " + lockId); + try { + MusicCore.voluntaryReleaseLock(lockId.getFullyQualifiedLockKey(), lockId.getLockReference()); + MusicCore.destroyLockRef(lockId.getFullyQualifiedLockKey(), lockId.getLockReference()); + } catch (MusicLockingException e) { + e.printStackTrace(); + } + } + } + + @Override + public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String tableName, JSONObject changedRow) { + // + return null; + } + + @Override + public void initializeMetricDataStructures() { + // + + } + + @Override + public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) { + return null; + } + + @Override + public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap transactionDigest, String txId, TxCommitProgress progressKeeper) + throws MDBCServiceException { + // TODO Auto-generated method stub + } + + @Override + public HashMap getTransactionDigest(MusixTxDigestId id) { + return null; + } + + @Override + public List getPartitionInformation(DatabasePartition partition) { + return null; + } + + @Override + public MriReference createMusicRangeInformation(MusicRangeInformationRow info) { + return null; + } + + @Override + public void appendToRedoLog(MriReference mriRowId, DatabasePartition partition, MusixTxDigestId newRecord) { + } + + @Override + public void addTxDigest(String musicTxDigestTable, MusixTxDigestId newId, String transactionDigest) { + } + + @Override + public void own(List ranges) { + throw new java.lang.UnsupportedOperationException("function not implemented yet"); + } + + @Override + public void appendRange(String rangeId, List ranges) { + throw new java.lang.UnsupportedOperationException("function not implemented yet"); + } + + @Override + public void relinquish(String ownerId, String rangeId) { + throw new java.lang.UnsupportedOperationException("function not implemented yet"); + } + + @Override + public MusicRangeInformationRow getMusicRangeInformation(UUID id){ + return null; + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/MySQLMixin.java b/src/main/java/org/onap/music/mdbc/mixins/MySQLMixin.java new file mode 100755 index 0000000..d78bc9b --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/MySQLMixin.java @@ -0,0 +1,786 @@ +package org.onap.music.mdbc.mixins; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.TreeSet; + +import org.json.JSONObject; +import org.json.JSONTokener; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.MusicSqlManager; +import org.onap.music.mdbc.Range; +import org.onap.music.mdbc.TableInfo; +import org.onap.music.mdbc.tables.OperationType; +import org.onap.music.mdbc.tables.StagingTable; + +import net.sf.jsqlparser.JSQLParserException; +import net.sf.jsqlparser.parser.CCJSqlParserUtil; +import net.sf.jsqlparser.statement.delete.Delete; +import net.sf.jsqlparser.statement.insert.Insert; +import net.sf.jsqlparser.statement.update.Update; + +/** + * This class provides the methods that MDBC needs in order to mirror data to/from a + * MySQL or MariaDB database instance. + * This class uses the JSON_OBJECT() database function, which means it requires the following + * minimum versions of either database: + * + * + * + * + *
DATABASEVERSION
MySQL5.7.8
MariaDB10.2.3 (Note: 10.2.3 is currently (July 2017) a beta release)
+ * + * @author Robert P. Eby + */ +public class MySQLMixin implements DBInterface { + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MySQLMixin.class); + + public static final String MIXIN_NAME = "mysql"; + public static final String TRANS_TBL = "MDBC_TRANSLOG"; + private static final String CREATE_TBL_SQL = + "CREATE TABLE IF NOT EXISTS "+TRANS_TBL+ + " (IX INT AUTO_INCREMENT, OP CHAR(1), TABLENAME VARCHAR(255), NEWROWDATA VARCHAR(1024), KEYDATA VARCHAR(1024), CONNECTION_ID INT,PRIMARY KEY (IX))"; + + private final MusicSqlManager msm; + private final int connId; + private final String dbName; + private final Connection dbConnection; + private final Map tables; + private boolean server_tbl_created = false; + + public MySQLMixin() { + this.msm = null; + this.connId = 0; + this.dbName = null; + this.dbConnection = null; + this.tables = null; + } + public MySQLMixin(MusicSqlManager msm, String url, Connection conn, Properties info) { + this.msm = msm; + this.connId = generateConnID(conn); + this.dbName = getDBName(conn); + this.dbConnection = conn; + this.tables = new HashMap(); + } + // This is used to generate a unique connId for this connection to the DB. + private int generateConnID(Connection conn) { + int rv = (int) System.currentTimeMillis(); // random-ish + try { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT CONNECTION_ID() AS IX"); + if (rs.next()) { + rv = rs.getInt("IX"); + } + stmt.close(); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"generateConnID: problem generating a connection ID!"); + } + return rv; + } + + /** + * Get the name of this DBnterface mixin object. + * @return the name + */ + @Override + public String getMixinName() { + return MIXIN_NAME; + } + + @Override + public void close() { + // nothing yet + } + + /** + * Determines the db name associated with the connection + * This is the private/internal method that actually determines the name + * @param conn + * @return + */ + private String getDBName(Connection conn) { + String dbname = "mdbc"; //default name + try { + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT DATABASE() AS DB"); + if (rs.next()) { + dbname = rs.getString("DB"); + } + stmt.close(); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger, "getDBName: problem getting database name from mysql"); + } + return dbname; + } + + public String getDatabaseName() { + return this.dbName; + } + /** + * Get a set of the table names in the database. + * @return the set + */ + @Override + public Set getSQLTableSet() { + Set set = new TreeSet(); + String sql = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=DATABASE() AND TABLE_TYPE='BASE TABLE'"; + try { + Statement stmt = dbConnection.createStatement(); + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) { + String s = rs.getString("TABLE_NAME"); + set.add(s); + } + stmt.close(); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"getSQLTableSet: "+e); + } + logger.debug(EELFLoggerDelegate.applicationLogger,"getSQLTableSet returning: "+ set); + return set; + } +/* +mysql> describe tables; ++-----------------+---------------------+------+-----+---------+-------+ +| Field | Type | Null | Key | Default | Extra | ++-----------------+---------------------+------+-----+---------+-------+ +| TABLE_CATALOG | varchar(512) | NO | | | | +| TABLE_SCHEMA | varchar(64) | NO | | | | +| TABLE_NAME | varchar(64) | NO | | | | +| TABLE_TYPE | varchar(64) | NO | | | | +| ENGINE | varchar(64) | YES | | NULL | | +| VERSION | bigint(21) unsigned | YES | | NULL | | +| ROW_FORMAT | varchar(10) | YES | | NULL | | +| TABLE_ROWS | bigint(21) unsigned | YES | | NULL | | +| AVG_ROW_LENGTH | bigint(21) unsigned | YES | | NULL | | +| DATA_LENGTH | bigint(21) unsigned | YES | | NULL | | +| MAX_DATA_LENGTH | bigint(21) unsigned | YES | | NULL | | +| INDEX_LENGTH | bigint(21) unsigned | YES | | NULL | | +| DATA_FREE | bigint(21) unsigned | YES | | NULL | | +| AUTO_INCREMENT | bigint(21) unsigned | YES | | NULL | | +| CREATE_TIME | datetime | YES | | NULL | | +| UPDATE_TIME | datetime | YES | | NULL | | +| CHECK_TIME | datetime | YES | | NULL | | +| TABLE_COLLATION | varchar(32) | YES | | NULL | | +| CHECKSUM | bigint(21) unsigned | YES | | NULL | | +| CREATE_OPTIONS | varchar(255) | YES | | NULL | | +| TABLE_COMMENT | varchar(2048) | NO | | | | ++-----------------+---------------------+------+-----+---------+-------+ + */ + /** + * Return a TableInfo object for the specified table. + * This method first looks in a cache of previously constructed TableInfo objects for the table. + * If not found, it queries the INFORMATION_SCHEMA.COLUMNS table to obtain the column names, types, and indexes of the table. + * It creates a new TableInfo object with the results. + * @param tableName the table to look up + * @return a TableInfo object containing the info we need, or null if the table does not exist + */ + @Override + public TableInfo getTableInfo(String tableName) { + TableInfo ti = tables.get(tableName); + if (ti == null) { + try { + String tbl = tableName;//.toUpperCase(); + String sql = "SELECT COLUMN_NAME, DATA_TYPE, COLUMN_KEY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA=DATABASE() AND TABLE_NAME='"+tbl+"'"; + ResultSet rs = executeSQLRead(sql); + if (rs != null) { + ti = new TableInfo(); + while (rs.next()) { + String name = rs.getString("COLUMN_NAME"); + String type = rs.getString("DATA_TYPE"); + String ckey = rs.getString("COLUMN_KEY"); + ti.columns.add(name); + ti.coltype.add(mapDatatypeNameToType(type)); + ti.iskey.add(ckey != null && !ckey.equals("")); + } + rs.getStatement().close(); + } else { + logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL."); + } + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL: "+e); + return null; + } + tables.put(tableName, ti); + } + return ti; + } + // Map MySQL data type names to the java.sql.Types equivalent + private int mapDatatypeNameToType(String nm) { + switch (nm) { + case "tinyint": return Types.TINYINT; + case "smallint": return Types.SMALLINT; + case "mediumint": + case "int": return Types.INTEGER; + case "bigint": return Types.BIGINT; + case "decimal": + case "numeric": return Types.DECIMAL; + case "float": return Types.FLOAT; + case "double": return Types.DOUBLE; + case "date": + case "datetime": return Types.DATE; + case "time": return Types.TIME; + case "timestamp": return Types.TIMESTAMP; + case "char": return Types.CHAR; + case "text": + case "varchar": return Types.VARCHAR; + case "mediumblob": + case "blob": return Types.VARCHAR; + default: + logger.error(EELFLoggerDelegate.errorLogger,"unrecognized and/or unsupported data type "+nm); + return Types.VARCHAR; + } + } + @Override + public void createSQLTriggers(String tableName) { + // Don't create triggers for the table the triggers write into!!! + if (tableName.equals(TRANS_TBL)) + return; + try { + if (!server_tbl_created) { + try { + Statement stmt = dbConnection.createStatement(); + stmt.execute(CREATE_TBL_SQL); + stmt.close(); + logger.info(EELFLoggerDelegate.applicationLogger,"createSQLTriggers: Server side dirty table created."); + server_tbl_created = true; + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: problem creating the "+TRANS_TBL+" table!"); + } + } + + // Give the triggers a way to find this MSM + for (String name : getTriggerNames(tableName)) { + logger.info(EELFLoggerDelegate.applicationLogger,"ADD trigger "+name+" to msm_map"); + //\TODO fix this is an error + //msm.register(name); + } + // No SELECT trigger + executeSQLWrite(generateTrigger(tableName, "INSERT")); + executeSQLWrite(generateTrigger(tableName, "UPDATE")); + executeSQLWrite(generateTrigger(tableName, "DELETE")); + } catch (SQLException e) { + if (e.getMessage().equals("Trigger already exists")) { + //only warn if trigger already exists + logger.warn(EELFLoggerDelegate.applicationLogger, "createSQLTriggers" + e); + } else { + logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: "+e); + } + } + } +/* +CREATE TRIGGER `triggername` BEFORE UPDATE ON `table` +FOR EACH ROW BEGIN +INSERT INTO `log_table` ( `field1` `field2`, ...) VALUES ( NEW.`field1`, NEW.`field2`, ...) ; +END; + +OLD.field refers to the old value +NEW.field refers to the new value +*/ + private String generateTrigger(String tableName, String op) { + boolean isdelete = op.equals("DELETE"); + boolean isinsert = op.equals("INSERT"); + TableInfo ti = getTableInfo(tableName); + StringBuilder newJson = new StringBuilder("JSON_OBJECT("); // JSON_OBJECT(key, val, key, val) page 1766 + StringBuilder keyJson = new StringBuilder("JSON_OBJECT("); + String pfx = ""; + String keypfx = ""; + for (String col : ti.columns) { + newJson.append(pfx) + .append("'").append(col).append("', ") + .append(isdelete ? "OLD." : "NEW.") + .append(col); + if (ti.iskey(col) || !ti.hasKey()) { + keyJson.append(keypfx) + .append("'").append(col).append("', ") + .append(isinsert ? "NEW." : "OLD.") + .append(col); + keypfx = ", "; + } + pfx = ", "; + } + newJson.append(")"); + keyJson.append(")"); + //\TODO check if using mysql driver, so instead check the exception + StringBuilder sb = new StringBuilder() + .append("CREATE TRIGGER ") // IF NOT EXISTS not supported by MySQL! + .append(String.format("%s_%s", op.substring(0, 1), tableName)) + .append(" AFTER ") + .append(op) + .append(" ON ") + .append(tableName) + .append(" FOR EACH ROW INSERT INTO ") + .append(TRANS_TBL) + .append(" (TABLENAME, OP, NEWROWDATA, KEYDATA, CONNECTION_ID) VALUES('") + .append(tableName) + .append("', ") + .append(isdelete ? "'D'" : (op.equals("INSERT") ? "'I'" : "'U'")) + .append(", ") + .append(newJson.toString()) + .append(", ") + .append(keyJson.toString()) + .append(", ") + .append("CONNECTION_ID()") + .append(")"); + return sb.toString(); + } + private String[] getTriggerNames(String tableName) { + return new String[] { + "I_" + tableName, // INSERT trigger + "U_" + tableName, // UPDATE trigger + "D_" + tableName // DELETE trigger + }; + } + + @Override + public void dropSQLTriggers(String tableName) { + try { + for (String name : getTriggerNames(tableName)) { + logger.info(EELFLoggerDelegate.applicationLogger,"REMOVE trigger "+name+" from msmmap"); + executeSQLWrite("DROP TRIGGER IF EXISTS " +name); + //\TODO Fix this is an error + //msm.unregister(name); + } + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"dropSQLTriggers: "+e); + } + } + + @Override + public void insertRowIntoSqlDb(String tableName, Map map) { + TableInfo ti = getTableInfo(tableName); + String sql = ""; + if (rowExists(tableName, ti, map)) { + // Update - Construct the what and where strings for the DB write + StringBuilder what = new StringBuilder(); + StringBuilder where = new StringBuilder(); + String pfx = ""; + String pfx2 = ""; + for (int i = 0; i < ti.columns.size(); i++) { + String col = ti.columns.get(i); + String val = Utils.getStringValue(map.get(col)); + if (ti.iskey.get(i)) { + where.append(pfx).append(col).append("=").append(val); + pfx = " AND "; + } else { + what.append(pfx2).append(col).append("=").append(val); + pfx2 = ", "; + } + } + sql = String.format("UPDATE %s SET %s WHERE %s", tableName, what.toString(), where.toString()); + } else { + // Construct the value string and column name string for the DB write + StringBuilder fields = new StringBuilder(); + StringBuilder values = new StringBuilder(); + String pfx = ""; + for (String col : ti.columns) { + fields.append(pfx).append(col); + values.append(pfx).append(Utils.getStringValue(map.get(col))); + pfx = ", "; + } + sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString()); + } + try { + executeSQLWrite(sql); + } catch (SQLException e1) { + logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite: "+e1); + } + // TODO - remove any entries from MDBC_TRANSLOG corresponding to this update + // SELECT IX, OP, KEYDATA FROM MDBC_TRANS_TBL WHERE CONNID = "+connId AND TABLENAME = tblname + } + + private boolean rowExists(String tableName, TableInfo ti, Map map) { + StringBuilder where = new StringBuilder(); + String pfx = ""; + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + String col = ti.columns.get(i); + String val = Utils.getStringValue(map.get(col)); + where.append(pfx).append(col).append("=").append(val); + pfx = " AND "; + } + } + String sql = String.format("SELECT * FROM %s WHERE %s", tableName, where.toString()); + ResultSet rs = executeSQLRead(sql); + try { + boolean rv = rs.next(); + rs.close(); + return rv; + } catch (SQLException e) { + return false; + } + } + + + @Override + public void deleteRowFromSqlDb(String tableName, Map map) { + TableInfo ti = getTableInfo(tableName); + StringBuilder where = new StringBuilder(); + String pfx = ""; + for (int i = 0; i < ti.columns.size(); i++) { + if (ti.iskey.get(i)) { + String col = ti.columns.get(i); + Object val = map.get(col); + where.append(pfx).append(col).append("=").append(Utils.getStringValue(val)); + pfx = " AND "; + } + } + try { + String sql = String.format("DELETE FROM %s WHERE %s", tableName, where.toString()); + executeSQLWrite(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + /** + * This method executes a read query in the SQL database. Methods that call this method should be sure + * to call resultset.getStatement().close() when done in order to free up resources. + * @param sql the query to run + * @return a ResultSet containing the rows returned from the query + */ + @Override + public ResultSet executeSQLRead(String sql) { + logger.debug(EELFLoggerDelegate.applicationLogger,"executeSQLRead"); + logger.debug("Executing SQL read:"+ sql); + ResultSet rs = null; + try { + Statement stmt = dbConnection.createStatement(); + rs = stmt.executeQuery(sql); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"executeSQLRead"+e); + } + return rs; + } + + /** + * This method executes a write query in the sql database. + * @param sql the SQL to be sent to MySQL + * @throws SQLException if an underlying JDBC method throws an exception + */ + protected void executeSQLWrite(String sql) throws SQLException { + logger.debug(EELFLoggerDelegate.applicationLogger, "Executing SQL write:"+ sql); + + Statement stmt = dbConnection.createStatement(); + stmt.execute(sql); + stmt.close(); + } + + /** + * Code to be run within the DB driver before a SQL statement is executed. This is where tables + * can be synchronized before a SELECT, for those databases that do not support SELECT triggers. + * @param sql the SQL statement that is about to be executed + * @return list of keys that will be updated, if they can't be determined afterwards (i.e. sql table doesn't have primary key) + */ + @Override + public void preStatementHook(final String sql) { + if (sql == null) { + return; + } + String cmd = sql.trim().toLowerCase(); + if (cmd.startsWith("select")) { + String[] parts = sql.trim().split(" "); + Set set = getSQLTableSet(); + for (String part : parts) { + if (set.contains(part.toUpperCase())) { + // Found a candidate table name in the SELECT SQL -- update this table + //msm.readDirtyRowsAndUpdateDb(part); + } + } + } + } + + /** + * Code to be run within the DB driver after a SQL statement has been executed. This is where remote + * statement actions can be copied back to Cassandra/MUSIC. + * @param sql the SQL statement that was executed + */ + @Override + public void postStatementHook(final String sql,Map transactionDigest) { + if (sql != null) { + String[] parts = sql.trim().split(" "); + String cmd = parts[0].toLowerCase(); + if ("delete".equals(cmd) || "insert".equals(cmd) || "update".equals(cmd)) { + try { + this.updateStagingTable(transactionDigest); + } catch (NoSuchFieldException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + } + + private OperationType toOpEnum(String operation) throws NoSuchFieldException { + switch (operation.toLowerCase()) { + case "i": + return OperationType.INSERT; + case "d": + return OperationType.DELETE; + case "u": + return OperationType.UPDATE; + case "s": + return OperationType.SELECT; + default: + logger.error(EELFLoggerDelegate.errorLogger,"Invalid operation selected: ["+operation+"]"); + throw new NoSuchFieldException("Invalid operation enum"); + } + + } + /** + * Copy data that is in transaction table into music interface + * @param transactionDigests + * @throws NoSuchFieldException + */ + private void updateStagingTable(Map transactionDigests) throws NoSuchFieldException { + // copy from DB.MDBC_TRANSLOG where connid == myconnid + // then delete from MDBC_TRANSLOG + String sql2 = "SELECT IX, TABLENAME, OP, KEYDATA, NEWROWDATA FROM "+TRANS_TBL +" WHERE CONNECTION_ID = " + this.connId; + try { + ResultSet rs = executeSQLRead(sql2); + Set rows = new TreeSet(); + while (rs.next()) { + int ix = rs.getInt("IX"); + String op = rs.getString("OP"); + OperationType opType = toOpEnum(op); + String tbl = rs.getString("TABLENAME"); + String keydataStr = rs.getString("KEYDATA"); + String newRowStr = rs.getString("NEWROWDATA"); + JSONObject newRow = new JSONObject(new JSONTokener(newRowStr)); + String musicKey; + TableInfo ti = getTableInfo(tbl); + if (!ti.hasKey()) { + //create music key + //\TODO fix, this is completely broken + //if (op.startsWith("I")) { + //\TODO Improve the generation of primary key, it should be generated using + // the actual columns, otherwise performance when doing range queries are going + // to be even worse (see the else bracket down) + // + musicKey = msm.generateUniqueKey(); + /*} else { + //get key from data + musicKey = msm.getMusicKeyFromRowWithoutPrimaryIndexes(tbl,newRow); + }*/ + newRow.put(msm.getMusicDefaultPrimaryKeyName(), musicKey); + } + else { + //Use the keys + musicKey = msm.getMusicKeyFromRow(tbl, newRow); + if(musicKey.isEmpty()) { + logger.error(EELFLoggerDelegate.errorLogger,"Primary key is invalid: ["+tbl+","+op+"]"); + throw new NoSuchFieldException("Invalid operation enum"); + } + } + Range range = new Range(tbl); + if(!transactionDigests.containsKey(range)) { + transactionDigests.put(range, new StagingTable()); + } + transactionDigests.get(range).addOperation(musicKey, opType, newRow.toString()); + rows.add(ix); + } + rs.getStatement().close(); + if (rows.size() > 0) { + sql2 = "DELETE FROM "+TRANS_TBL+" WHERE IX = ?"; + PreparedStatement ps = dbConnection.prepareStatement(sql2); + logger.debug("Executing: "+sql2); + logger.debug(" For ix = "+rows); + for (int ix : rows) { + ps.setInt(1, ix); + ps.execute(); + } + ps.close(); + } + } catch (SQLException e) { + logger.warn("Exception in postStatementHook: "+e); + e.printStackTrace(); + } + } + + + + /** + * Update music with data from MySQL table + * + * @param tableName - name of table to update in music + */ + @Override + public void synchronizeData(String tableName) { + ResultSet rs = null; + TableInfo ti = getTableInfo(tableName); + String query = "SELECT * FROM "+tableName; + + try { + rs = executeSQLRead(query); + if(rs==null) return; + while(rs.next()) { + + JSONObject jo = new JSONObject(); + if (!getTableInfo(tableName).hasKey()) { + String musicKey = msm.generateUniqueKey(); + jo.put(msm.getMusicDefaultPrimaryKeyName(), musicKey); + } + + for (String col : ti.columns) { + jo.put(col, rs.getString(col)); + } + + @SuppressWarnings("unused") + Object[] row = Utils.jsonToRow(ti,tableName, jo,msm.getMusicDefaultPrimaryKeyName()); + //\FIXME this is wrong now, update of the dirty row and entity is now handled by the archival process + //msm.updateDirtyRowAndEntityTableInMusic(ti,tableName, jo); + } + } catch (Exception e) { + logger.error(EELFLoggerDelegate.errorLogger, "synchronizing data " + tableName + + " -> " + e.getMessage()); + } + finally { + try { + rs.close(); + } catch (SQLException e) { + //continue + } + } + + } + + /** + * Return a list of "reserved" names, that should not be used by MySQL client/MUSIC + * These are reserved for mdbc + */ + @Override + public List getReservedTblNames() { + ArrayList rsvdTables = new ArrayList(); + rsvdTables.add(TRANS_TBL); + //Add others here as necessary + return rsvdTables; + } + @Override + public String getPrimaryKey(String sql, String tableName) { + // + return null; + } + + @SuppressWarnings("unused") + @Deprecated + private ArrayList getMusicKey(String sql) { + try { + net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql); + if (stmt instanceof Insert) { + Insert s = (Insert) stmt; + String tbl = s.getTable().getName(); + return getMusicKey(tbl, "INSERT", sql); + } else if (stmt instanceof Update){ + Update u = (Update) stmt; + String tbl = u.getTables().get(0).getName(); + return getMusicKey(tbl, "UPDATE", sql); + } else if (stmt instanceof Delete) { + Delete d = (Delete) stmt; + //TODO: IMPLEMENT + String tbl = d.getTable().getName(); + return getMusicKey(tbl, "DELETE", sql); + } else { + System.err.println("Not recognized sql type"); + } + + } catch (JSQLParserException e) { + + e.printStackTrace(); + } + //Something went wrong here + return new ArrayList(); + } + + /** + * Returns all keys that matches the current sql statement, and not in already updated keys. + * + * @param tbl + * @param cmd + * @param sql + */ + @Deprecated + private ArrayList getMusicKey(String tbl, String cmd, String sql) { + ArrayList musicKeys = new ArrayList(); + /* + if (cmd.equalsIgnoreCase("insert")) { + //create key, return key + musicKeys.add(msm.generatePrimaryKey()); + } else if (cmd.equalsIgnoreCase("update") || cmd.equalsIgnoreCase("delete")) { + try { + net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql); + String where; + if (stmt instanceof Update) { + where = ((Update) stmt).getWhere().toString(); + } else if (stmt instanceof Delete) { + where = ((Delete) stmt).getWhere().toString(); + } else { + System.err.println("Unknown type: " +stmt.getClass()); + where = ""; + } + ResultSet rs = executeSQLRead("SELECT * FROM " + tbl + " WHERE " + where); + musicKeys = msm.getMusicKeysWhere(tbl, Utils.parseResults(getTableInfo(tbl), rs)); + } catch (JSQLParserException e) { + + e.printStackTrace(); + } catch (SQLException e) { + //Not a valid sql query + e.printStackTrace(); + } + } + */ + return musicKeys; + } + + + @Deprecated + public void insertRowIntoSqlDbOLD(String tableName, Map map) { + // First construct the value string and column name string for the db write + TableInfo ti = getTableInfo(tableName); + StringBuilder fields = new StringBuilder(); + StringBuilder values = new StringBuilder(); + String pfx = ""; + for (String col : ti.columns) { + fields.append(pfx).append(col); + values.append(pfx).append(Utils.getStringValue(map.get(col))); + pfx = ", "; + } + + try { + String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString()); + executeSQLWrite(sql); + } catch (SQLException e) { + logger.error(EELFLoggerDelegate.errorLogger,"Insert failed because row exists, do an update"); + StringBuilder where = new StringBuilder(); + pfx = ""; + String pfx2 = ""; + fields.setLength(0); + for (int i = 0; i < ti.columns.size(); i++) { + String col = ti.columns.get(i); + String val = Utils.getStringValue(map.get(col)); + if (ti.iskey.get(i)) { + where.append(pfx).append(col).append("=").append(val); + pfx = " AND "; + } else { + fields.append(pfx2).append(col).append("=").append(val); + pfx2 = ", "; + } + } + String sql = String.format("UPDATE %s SET %s WHERE %s", tableName, fields.toString(), where.toString()); + try { + executeSQLWrite(sql); + } catch (SQLException e1) { + logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite"+e1); + } + } + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/Utils.java b/src/main/java/org/onap/music/mdbc/mixins/Utils.java new file mode 100755 index 0000000..2fd0f6e --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/Utils.java @@ -0,0 +1,220 @@ +package org.onap.music.mdbc.mixins; + +import java.io.IOException; +import java.io.InputStream; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Properties; + +import org.json.JSONObject; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.TableInfo; +import com.datastax.driver.core.utils.Bytes; + +/** + * Utility functions used by several of the mixins should go here. + * + * @author Robert P. Eby + */ +public class Utils { + private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Utils.class); + + /** + * Transforms and JsonObject into an array of objects + * @param ti information related to the table + * @param tbl table that jo belong to + * @param jo object that represents a row in the table + * @param musicDefaultPrimaryKeyName contains the name of key associated with the default primary key used by MUSIC, it can be null, if not requird + * @return array with the objects in the row + */ + public static Object[] jsonToRow(TableInfo ti, String tbl, JSONObject jo, String musicDefaultPrimaryKeyName) { + int columnSize = ti.columns.size(); + ArrayList rv = new ArrayList(); + if (musicDefaultPrimaryKeyName!=null && jo.has(musicDefaultPrimaryKeyName)) { + rv.add(jo.getString(musicDefaultPrimaryKeyName)); + } + for (int i = 0; i < columnSize; i++) { + String colname = ti.columns.get(i); + switch (ti.coltype.get(i)) { + case Types.BIGINT: + rv.add(jo.optLong(colname, 0)); + break; + case Types.BOOLEAN: + rv.add(jo.optBoolean(colname, false)); + break; + case Types.BLOB: + rv.add(jo.optString(colname, "")); + break; + case Types.DECIMAL: + rv.add(jo.optBigDecimal(colname, BigDecimal.ZERO)); + break; + case Types.DOUBLE: + rv.add(jo.optDouble(colname, 0)); + break; + case Types.INTEGER: + rv.add(jo.optInt(colname, 0)); + break; + case Types.TIMESTAMP: + //rv[i] = new Date(jo.optString(colname, "")); + rv.add(jo.optString(colname, "")); + break; + case Types.DATE: + case Types.VARCHAR: + //Fall through + default: + rv.add(jo.optString(colname, "")); + break; + } + } + return rv.toArray(); + } + + /** + * Return a String equivalent of an Object. Useful for writing SQL. + * @param val the object to String-ify + * @return the String value + */ + public static String getStringValue(Object val) { + if (val == null) + return "NULL"; + if (val instanceof String) + return "'" + val.toString().replaceAll("'", "''") + "'"; // double any quotes + if (val instanceof Number) + return ""+val; + if (val instanceof ByteBuffer) + return "'" + Bytes.toHexString((ByteBuffer)val).substring(2) + "'"; // substring(2) is to remove the "0x" at front + if (val instanceof Date) + return "'" + (new Timestamp(((Date)val).getTime())).toString() + "'"; + // Boolean, and anything else + return val.toString(); + } + + /** + * Parse result set and put into object array + * @param tbl + * @param rs + * @return + * @throws SQLException + */ + public static ArrayList parseResults(TableInfo ti, ResultSet rs) throws SQLException { + ArrayList results = new ArrayList(); + while (rs.next()) { + Object[] row = new Object[ti.columns.size()]; + for (int i = 0; i < ti.columns.size(); i++) { + String colname = ti.columns.get(i); + switch (ti.coltype.get(i)) { + case Types.BIGINT: + row[i] = rs.getLong(colname); + break; + case Types.BOOLEAN: + row[i] = rs.getBoolean(colname); + break; + case Types.BLOB: + System.err.println("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname); + //logger.error("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname); + // throw an exception here??? + break; + case Types.DOUBLE: + row[i] = rs.getDouble(colname); + break; + case Types.INTEGER: + row[i] = rs.getInt(colname); + break; + case Types.TIMESTAMP: + //rv[i] = new Date(jo.optString(colname, "")); + row[i] = rs.getString(colname); + break; + case Types.VARCHAR: + //Fall through + default: + row[i] = rs.getString(colname); + break; + } + } + results.add(row); + } + return results; + } + + @SuppressWarnings("unused") + static List> getClassesImplementing(Class implx) { + Properties pr = null; + try { + pr = new Properties(); + pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties")); + } + catch (IOException e) { + logger.error(EELFLoggerDelegate.errorLogger, "Could not load property file > " + e.getMessage()); + } + + List> list = new ArrayList>(); + if (pr==null) { + return list; + } + String mixins = pr.getProperty("MIXINS"); + for (String className: mixins.split("[ ,]")) { + try { + Class cl = Class.forName(className.trim()); + if (MixinFactory.impl(cl, implx)) { + list.add(cl); + } + } catch (ClassNotFoundException e) { + logger.error(EELFLoggerDelegate.errorLogger,"Mixin class "+className+" not found."); + } + } + return list; + } + + public static void registerDefaultDrivers() { + Properties pr = null; + try { + pr = new Properties(); + pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties")); + } + catch (IOException e) { + logger.error("Could not load property file > " + e.getMessage()); + } + + @SuppressWarnings("unused") + List> list = new ArrayList>(); + String drivers = pr.getProperty("DEFAULT_DRIVERS"); + for (String driver: drivers.split("[ ,]")) { + logger.info(EELFLoggerDelegate.applicationLogger, "Registering jdbc driver '" + driver + "'"); + try { + @SuppressWarnings("unused") + Class cl = Class.forName(driver.trim()); + } catch (ClassNotFoundException e) { + logger.error(EELFLoggerDelegate.errorLogger,"Driver class "+driver+" not found."); + } + } + } + + public static Properties getMdbcProperties() { + Properties prop = new Properties(); + InputStream input = null; + try { + input = Utils.class.getClassLoader().getResourceAsStream("/mdbc.properties"); + prop.load(input); + } catch (Exception e) { + logger.warn(EELFLoggerDelegate.applicationLogger, "Could load mdbc.properties." + + "Proceeding with defaults " + e.getMessage()); + } finally { + if (input != null) { + try { + input.close(); + } catch (IOException e) { + logger.error(EELFLoggerDelegate.errorLogger, e.getMessage()); + } + } + } + return prop; + } +} diff --git a/src/main/java/org/onap/music/mdbc/mixins/package-info.java b/src/main/java/org/onap/music/mdbc/mixins/package-info.java new file mode 100755 index 0000000..703a119 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/mixins/package-info.java @@ -0,0 +1,47 @@ +/** + *

+ * This package provides the "mixins" to use when constructing a MusicSqlManager. The mixins define how MusicSqlManager + * will interface both to the database being mirrored (via the {@link org.onap.music.mdbc.mixins.DBInterface} interface), + * and how it will interface to the persistence layer provided by MUSIC (via the {@link org.onap.music.mdbc.mixins.MusicInterface} + * interface). + *

+ *

+ * The choice of which mixins to use is determined by the MusicSqlManager constructor. + * It will decide based upon the URL and connection properties with which it is presented (from the + * {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call). + *

+ *

+ * The list of mixins that may be selected from is stored in the properties files mdbc.properties + * under the name MIXINS. This implementation provides the following mixins: + *

+ * + * + * + * + * + * + * + *
NameClassDescription
cassandrac.a.r.m.m.CassandraMixinA Cassandra based + * persistence layer (without any of the table locking that MUSIC normally provides).
cassandra2c.a.r.m.m.Cassandra2MixinSimilar to the cassandra mixin, but stores all + * dirty row information in one table, rather than one table per real table.
h2c.a.r.m.m.H2MixinThis mixin provides access to either an in-memory, or a local + * (file-based) version of the H2 database.
h2serverc.a.r.m.m.H2ServerMixinThis mixin provides access to a copy of the H2 database + * running as a server. Because the server needs special Java classes in order to handle certain TRIGGER actions, the + * server must be et up in a special way (see below).
mysqlc.a.r.m.m.MySQLMixinThis mixin provides access to MySQL running on a remote server.
+ *

Starting the H2 Server

+ *

+ * The H2 Server, when used with MDBC, must contain the MDBC Trigger class, and supporting libraries. + * This can be done as follows: + *

+ *
+ *	CLASSPATH=$PWD/target/mdbc-h2server-0.0.1-SNAPSHOT.jar
+ *	CLASSPATH=$CLASSPATH:$HOME/.m2/repository/com/h2database/h2/1.3.168/h2-1.3.168.jar
+ *	CLASSPATH=$CLASSPATH:$HOME/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar
+ *	CLASSPATH=$CLASSPATH:$HOME/.m2/repository/org/json/json/20160810/json-20160810.jar
+ *	export CLASSPATH
+ *	java org.h2.tools.Server
+ * 
+ *

+ * The mdbc-h2server-0.0.1-SNAPSHOT.jar file is built with Maven using the pom-h2server.xml pom file. + *

+ */ +package org.onap.music.mdbc.mixins; diff --git a/src/main/java/org/onap/music/mdbc/package-info.java b/src/main/java/org/onap/music/mdbc/package-info.java new file mode 100755 index 0000000..576ab88 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/package-info.java @@ -0,0 +1,87 @@ +/** + *

+ * This package provides a JDBC driver that can be used to mirror the contents of a database to and from + * Cassandra. The mirroring occurs as a side effect of + * execute() statements against a JDBC connection, and triggers placed in the database to catch database modifications. + * The initial implementation is written to mirror an H2 database. + *

+ *

+ * This JDBC driver will intercept all table creations, SELECTs, INSERTs, DELETEs, and UPDATEs made to the underlying + * database, and make sure they are copied to Cassandra. In addition, for every table XX that is created, another table + * DIRTY_XX will be created to communicate the existence of dirty rows to other Cassandra replicas (with the + * Cassandra2 Mixin, the table is called DIRTY____ and there is only one table). Dirty rows + * will be copied, as needed back into the database from Cassandra before any SELECT. + *

+ *

To use with JDBC

+ *
    + *
  1. Add this jar, and all dependent jars to your CLASSPATH.
  2. + *
  3. Rewrite your JDBC URLs from jdbc:h2:... to jdbc:mdbc:.... + *
  4. If you supply properties to the {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call, + * use the following optional properties to control behavior of the proxy: + * + * + * + * + * + * + * + * + * + * + *
    Property NameProperty ValueDefault Value
    MDBC_DB_MIXINThe mixin name to use to select the database mixin to use for this connection.
    MDBC_MUSIC_MIXINThe mixin name to use to select the MUSIC mixin to use for this connection.
    myidThe ID of this replica in the collection of replicas sharing the same tables.0
    replicasA comma-separated list of replica names for the collection of replicas sharing the same tables.the value of myid
    music_keyspaceThe keyspace name to use in Cassandra for all tables created by this instance of MDBC.mdbc
    music_addressThe IP address to use to connect to Cassandra.localhost
    music_rfactorThe replication factor to use for the new keyspace that is created.2
    disabledIf set to true the mirroring is completely disabled; this is the equivalent of using the database driver directly.false
    + *
  5. + *
  6. Load the driver using the following call: + *
    + *	Class.forName("org.onap.music.mdbc.ProxyDriver");
    + * 
  7. + *
+ *

Because, under the current design, the MDBC driver must be running within the same JVM as the database, MDBC + * will only explicitly support in-memory databases (URL of jdbc:mdbc:mem:...), or local file + * databases (URL of jdbc:mdbc:/path/to/file). Attempts to access a remote H2 server (URL + * jdbc:mdbc:tcp://host/path/to/db) will probably not work, although MDBC will not stop you from trying. + *

+ * + *

To Define a Tomcat DataSource Resource

+ *

The following code snippet can be used as a guide when setting up a Tomcat DataSource Resource. + * This snippet goes in the server.xml file. The items in bold indicate changed or new items:

+ *
+ * <Resource name="jdbc/ProcessEngine"
+ *	auth="Container"
+ *	type="javax.sql.DataSource"
+ *	factory="org.apache.tomcat.jdbc.pool.DataSourceFactory"
+ *	uniqueResourceName="process-engine"
+ *	driverClassName="org.onap.music.mdbc.ProxyDriver"
+ *	url="jdbc:mdbc:./camunda-h2-dbs/process-engine;MVCC=TRUE;TRACE_LEVEL_FILE=0;DB_CLOSE_ON_EXIT=FALSE"
+ *	connectionProperties="myid=0;replicas=0,1,2;music_keyspace=camunda;music_address=localhost"
+ *	username="sa"
+ *	password="sa"
+ *	maxActive="20"
+ *	minIdle="5" />
+ * 
+ * + *

To Define a JBoss DataSource

+ *

The following code snippet can be used as a guide when setting up a JBoss DataSource. + * This snippet goes in the service.xml file. The items in bold indicate changed or new items:

+ *
+ * <datasources>
+ *   <datasource jta="true" jndi-name="java:jboss/datasources/ProcessEngine" pool-name="ProcessEngine" enabled="true" use-java-context="true" use-ccm="true">
+ *      <connection-url>jdbc:mdbc:/opt/jboss-eap-6.2.4/standalone/camunda-h2-dbs/process-engine;DB_CLOSE_DELAY=-1;MVCC=TRUE;DB_CLOSE_ON_EXIT=FALSE</connection-url>
+ *      <connection-property name="music_keyspace">
+ *        camunda
+ *      </connection-property>
+ *      <driver>mdbc</driver>
+ *      <security>
+ *        <user-name>sa</user-name>
+ *        <password>sa</password>
+ *      </security>
+ *    </datasource>
+ *    <drivers>
+ *      <driver name="mdbc" module="org.onap.music.mdbc">
+ *        <driver-class>org.onap.music.mdbc.ProxyDriver</driver-class>
+ *      </driver>
+ *    </drivers>
+ *  </datasources>
+ * 
+ *

Note: This assumes that you have built and installed the org.onap.music.mdbc module within JBoss. + */ +package org.onap.music.mdbc; diff --git a/src/main/java/org/onap/music/mdbc/tables/MriReference.java b/src/main/java/org/onap/music/mdbc/tables/MriReference.java new file mode 100644 index 0000000..29de8d0 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/MriReference.java @@ -0,0 +1,14 @@ +package org.onap.music.mdbc.tables; + +import java.util.UUID; + +public final class MriReference { + public final String table; + public final UUID index; + + public MriReference(String table, UUID index) { + this.table = table; + this.index= index; + } + +} diff --git a/src/main/java/org/onap/music/mdbc/tables/MusicRangeInformationRow.java b/src/main/java/org/onap/music/mdbc/tables/MusicRangeInformationRow.java new file mode 100644 index 0000000..5d7bc97 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/MusicRangeInformationRow.java @@ -0,0 +1,16 @@ +package org.onap.music.mdbc.tables; + +import java.util.List; +import java.util.UUID; + +public final class MusicRangeInformationRow { + public final UUID index; + public final PartitionInformation partition; + public final List redoLog; + + public MusicRangeInformationRow(UUID index, List redoLog, PartitionInformation partition) { + this.index = index; + this.redoLog = redoLog; + this.partition = partition; + } +} diff --git a/src/main/java/org/onap/music/mdbc/tables/MusixTxDigestId.java b/src/main/java/org/onap/music/mdbc/tables/MusixTxDigestId.java new file mode 100644 index 0000000..0eccd53 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/MusixTxDigestId.java @@ -0,0 +1,15 @@ +package org.onap.music.mdbc.tables; + +import java.util.UUID; + +public final class MusixTxDigestId { + public final UUID tablePrimaryKey; + + public MusixTxDigestId(UUID primaryKey) { + this.tablePrimaryKey= primaryKey; + } + + public boolean isEmpty() { + return (this.tablePrimaryKey==null); + } +} diff --git a/src/main/java/org/onap/music/mdbc/tables/Operation.java b/src/main/java/org/onap/music/mdbc/tables/Operation.java new file mode 100644 index 0000000..85428a7 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/Operation.java @@ -0,0 +1,28 @@ +package org.onap.music.mdbc.tables; + +import java.io.Serializable; + +import org.json.JSONObject; +import org.json.JSONTokener; + +public final class Operation implements Serializable{ + + private static final long serialVersionUID = -1215301985078183104L; + + final OperationType TYPE; + final String NEW_VAL; + + public Operation(OperationType type, String newVal) { + TYPE = type; + NEW_VAL = newVal; + } + + public JSONObject getNewVal(){ + JSONObject newRow = new JSONObject(new JSONTokener(NEW_VAL)); + return newRow; + } + + public OperationType getOperationType() { + return this.TYPE; + } +} diff --git a/src/main/java/org/onap/music/mdbc/tables/OperationType.java b/src/main/java/org/onap/music/mdbc/tables/OperationType.java new file mode 100644 index 0000000..1ccd919 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/OperationType.java @@ -0,0 +1,5 @@ +package org.onap.music.mdbc.tables; + +public enum OperationType{ + DELETE, UPDATE, INSERT, SELECT +} diff --git a/src/main/java/org/onap/music/mdbc/tables/PartitionInformation.java b/src/main/java/org/onap/music/mdbc/tables/PartitionInformation.java new file mode 100644 index 0000000..3f99098 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/PartitionInformation.java @@ -0,0 +1,11 @@ +package org.onap.music.mdbc.tables; + +import java.util.List; + +public class PartitionInformation { + public final List tables; + + public PartitionInformation(List tables) { + this.tables=tables; + } +} diff --git a/src/main/java/org/onap/music/mdbc/tables/StagingTable.java b/src/main/java/org/onap/music/mdbc/tables/StagingTable.java new file mode 100644 index 0000000..6e93856 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/StagingTable.java @@ -0,0 +1,51 @@ +package org.onap.music.mdbc.tables; + +import java.io.Serializable; +import java.util.Deque; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.Set; +import org.apache.commons.lang3.tuple.Pair; +import org.json.JSONObject; + +import org.onap.music.logging.EELFLoggerDelegate; + +public class StagingTable implements Serializable{ + /** + * + */ + private static final long serialVersionUID = 7583182634761771943L; + private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StagingTable.class); + //primary key -> Operation + private HashMap> operations; + + public StagingTable() { + operations = new HashMap<>(); + } + + synchronized public void addOperation(String key, OperationType type, String newVal) { + if(!operations.containsKey(key)) { + operations.put(key, new LinkedList<>()); + } + operations.get(key).add(new Operation(type,newVal)); + } + + synchronized public Deque> getIterableSnapshot() throws NoSuchFieldException{ + Deque> response=new LinkedList>(); + //\TODO: check if we can just return the last change to a given key + Set keys = operations.keySet(); + for(String key : keys) { + Deque ops = operations.get(key); + if(ops.isEmpty()) { + logger.error(EELFLoggerDelegate.errorLogger, "Invalid state of the Operation data structure when creating snapshot"); + throw new NoSuchFieldException("Invalid state of the operation data structure"); + } + response.add(Pair.of(key,ops.getLast())); + } + return response; + } + + synchronized public void clean() { + operations.clear(); + } +} diff --git a/src/main/java/org/onap/music/mdbc/tables/TxCommitProgress.java b/src/main/java/org/onap/music/mdbc/tables/TxCommitProgress.java new file mode 100644 index 0000000..73ef4b2 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tables/TxCommitProgress.java @@ -0,0 +1,206 @@ +package org.onap.music.mdbc.tables; + +import java.math.BigInteger; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import com.datastax.driver.core.utils.UUIDs; + +import org.onap.music.logging.EELFLoggerDelegate; + +import java.sql.Connection; +import java.util.concurrent.atomic.AtomicReference; + + +public class TxCommitProgress{ + private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TxCommitProgress.class); + + private Map transactionInfo; + + public TxCommitProgress(){ + transactionInfo = new ConcurrentHashMap<>(); + } + + public boolean containsTx(String txId) { + return transactionInfo.containsKey(txId); + } + + public UUID getCommitId(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog.isCommitIdAssigned()) { + return prog.getCommitId(); + } + UUID commitId = UUIDs.random(); + prog.setCommitId(commitId); + return commitId; + } + + public void createNewTransactionTracker(String id, Connection conn) { + transactionInfo.put(id, new CommitProgress(id,conn)); + } + + public void commitRequested(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing commit request",txId); + } + prog.setCommitRequested(); + } + + public void setSQLDone(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of SQL",txId); + } + prog.setSQLCompleted(); + } + + public void setMusicDone(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of Music",txId); + } + prog.setMusicCompleted(); + } + + public Connection getConnection(String txId){ + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when retrieving statement",txId); + } + return prog.getConnection(); + } + + public void setRecordId(String txId, MusixTxDigestId recordId){ + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when setting record Id",txId); + } + prog.setRecordId(recordId); + } + + public MusixTxDigestId getRecordId(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when getting record Id",txId); + } + return prog.getRecordId(); + } + + public boolean isRecordIdAssigned(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking record",txId); + } + return prog.isRedoRecordAssigned(); + } + + public boolean isComplete(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking completion",txId); + } + return prog.isComplete(); + } + + public void reinitializeTxProgress(String txId) { + CommitProgress prog = transactionInfo.get(txId); + if(prog == null){ + logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when reinitializing tx progress",txId); + } + prog.reinitialize(); + } + + public void deleteTxProgress(String txId){ + transactionInfo.remove(txId); + } +} + +final class CommitProgress{ + private String lTxId; // local transaction id + private UUID commitId; // commit id + private boolean commitRequested; //indicates if the user tried to commit the request already. + private boolean SQLDone; // indicates if SQL was already committed + private boolean MusicDone; // indicates if music commit was already performed, atomic bool + private Connection connection;// reference to a connection object. This is used to complete a commit if it failed in the original thread. + private Long timestamp; // last time this data structure was updated + private MusixTxDigestId musixTxDigestId;// record id for each partition + + public CommitProgress(String id,Connection conn){ + musixTxDigestId =null; + lTxId = id; + commitRequested = false; + SQLDone = false; + MusicDone = false; + connection = conn; + commitId = null; + timestamp = System.currentTimeMillis(); + } + + public synchronized boolean isComplete() { + return commitRequested && SQLDone && MusicDone; + } + + public synchronized void setCommitId(UUID commitId) { + this.commitId = commitId; + timestamp = System.currentTimeMillis(); + } + + public synchronized void reinitialize() { + commitId = null; + musixTxDigestId =null; + commitRequested = false; + SQLDone = false; + MusicDone = false; + timestamp = System.currentTimeMillis(); + } + + public synchronized void setCommitRequested() { + commitRequested = true; + timestamp = System.currentTimeMillis(); + } + + public synchronized void setSQLCompleted() { + SQLDone = true; + timestamp = System.currentTimeMillis(); + } + + public synchronized void setMusicCompleted() { + MusicDone = true; + timestamp = System.currentTimeMillis(); + } + + public Connection getConnection() { + timestamp = System.currentTimeMillis(); + return connection; + } + + public long getTimestamInMillis() { + return timestamp; + } + + public synchronized void setRecordId(MusixTxDigestId id) { + musixTxDigestId = id; + timestamp = System.currentTimeMillis(); + } + + public synchronized boolean isRedoRecordAssigned() { + return this.musixTxDigestId !=null; + } + + public synchronized MusixTxDigestId getRecordId() { + return musixTxDigestId; + } + + public synchronized UUID getCommitId() { + return commitId; + } + + public synchronized String getId() { + return this.lTxId; + } + + public synchronized boolean isCommitIdAssigned() { + return this.commitId!= null; + } +} \ No newline at end of file diff --git a/src/main/java/org/onap/music/mdbc/tests/ConnectionTest.java b/src/main/java/org/onap/music/mdbc/tests/ConnectionTest.java new file mode 100644 index 0000000..5a0d98c --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/ConnectionTest.java @@ -0,0 +1,419 @@ +package org.onap.music.mdbc.tests; + +//import java.sql.Connection; +//import java.sql.DriverManager; +//import java.sql.PreparedStatement; +//import java.sql.ResultSet; +//import java.sql.SQLException; +//import java.sql.Statement; +//import java.util.HashSet; +//import java.util.Properties; +//import java.util.Set; +// +//import org.h2.tools.Server; +//import org.junit.After; +//import org.junit.AfterClass; +//import org.junit.Before; +//import org.junit.BeforeClass; +//import org.junit.Test; +//import org.slf4j.Logger; +//import org.slf4j.LoggerFactory; +// +//import com.mysql.jdbc.jdbc2.optional.MysqlDataSource; + + +//@FixMethodOrder(MethodSorters.NAME_ASCENDING) +//@RunWith(ConcurrentTestRunner.class) +public class ConnectionTest { +// +//// static { +//// System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "INFO"); +//// System.setProperty(org.slf4j.impl.SimpleLogger.LOG_FILE_KEY, String.format("ComparativeAnalysisTest-%d.log", System.currentTimeMillis())); +//// } +// private static final Logger LOG = LoggerFactory.getLogger(ConnectionTest.class); +// +// Set runningThreads = new HashSet(); +// +// @BeforeClass +// public static void setUpBeforeClass() throws Exception { +// +// } +// +// @AfterClass +// public static void tearDownAfterClass() throws Exception { +// +// } +// +// @Before +// public void setUp() throws Exception { +// +// } +// +// @After +// public void tearDown() throws Exception { +// +// } +// +// //@Test +// public void test01() { +// System.out.println("TEST 1: Getting ready for testing connection to Cassandra"); +// +// final CassandraConnector client = new CassandraConnector(); +// final String ipAddress = "localhost"; +// final int port = 9042; +// LOG.info("Connecting to IP Address " + ipAddress + ":" + port + "..."); +// client.connect(ipAddress, port); +// client.close(); +// System.out.println(); +// } +// +// /** +// * Tests for using jdbc as well as mdbc. In order to use, must have mysql and +// * running locally. Must have a database EMP created in the +// * mysql db. Uses "Driver.getConnection(com.mysql.jdbc.Driver)" for jdbc connection +// * +// */ +// //@Test +// public void test02() { +// System.out.println("TEST 2: Getting ready for testing connection via jdbc"); +// // JDBC driver name and database URL +// final String JDBC_DRIVER = "com.mysql.jdbc.Driver"; +// final String DB_URL = "jdbc:mysql://localhost/EMP"; +// +// // Database credentials +// final String USER = "alice"; +// final String PASS = "bob"; +// Properties connectionProps = new Properties(); +// connectionProps.put("user", USER); +// connectionProps.put("password", PASS); +// +// System.out.println("Connecting directly to database..."); +// connectViaDriverManager(JDBC_DRIVER, DB_URL, connectionProps); +// System.out.println(); +// } +// +// /** +// * Performs same test as @test02() except this test uses mdbc. +// * +// * In order to use, must have mysql and Cassandra services running locally. Must +// * have a database EMP created in the mysql db. Uses +// * "Driver.getConnection(org.onap.music.mdbc.ProxyDriver)" for mdbc +// * connection +// */ +// //@Test +// public void test03() { +// System.out.println("TEST 3: Getting ready for testing connection via mdbc"); +// // Database credentials +// final String USER = "alice"; +// final String PASS = "bob"; +// Properties connectionProps = new Properties(); +// connectionProps.put("user", USER); +// connectionProps.put("password", PASS); +// +// final String MDBC_DRIVER = "org.onap.music.mdbc.ProxyDriver"; +// final String MDBC_DB_URL = "jdbc:mdbc://localhost/TEST"; +// final String MDBC_DB_MIXIN = "mysql"; +// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN); +// +// System.out.println("Connecting to database via mdbc"); +// connectViaDriverManager(MDBC_DRIVER, MDBC_DB_URL, connectionProps); +// System.out.println(); +// } +// +// /** +// * Performs same test as @test02() except this test uses mdbc. +// * +// * In order to use, must have mysql and Cassandra services running locally. Must +// * have a database EMP created in the mysql db. Uses +// * "Driver.getConnection(org.onap.music.mdbc.ProxyDriver)" for mdbc +// * connection +// * +// * Uses preparedStatements +// */ +// //@Test +// public void test03point5() { +// System.out.println("TEST 3.5: Getting ready for testing connection via mdbc w/ PreparedStatement"); +// // Database credentials +// final String USER = "alice"; +// final String PASS = "bob"; +// Properties connectionProps = new Properties(); +// connectionProps.put("user", USER); +// connectionProps.put("password", PASS); +// +// final String MDBC_DRIVER = "org.onap.music.mdbc.ProxyDriver"; +// final String MDBC_DB_URL = "jdbc:mdbc://localhost/EMP"; +// //final String MDBC_DRIVER = "org.h2.Driver"; +// //final String MDBC_DB_URL = "jdbc:h2:tcp://localhost:9092/~/test"; +// final String MDBC_DB_MIXIN = "mysql"; +// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN); +// +// System.out.println("Connecting to database via mdbc"); +// Connection conn = null; +// PreparedStatement stmt = null; +// try { +// //STEP 2: Register JDBC driver +// Class.forName(MDBC_DRIVER); +// +// //STEP 3: Open a connection +// conn = DriverManager.getConnection(MDBC_DB_URL, connectionProps); +// conn.setAutoCommit(false); +// +// //STEP 4: Execute a query +// System.out.println("Inserting into DB"); +// stmt = conn.prepareStatement("INSERT INTO EMPLOYEE (id, first, last, age) VALUES (?, ?, ?, ?)"); +// stmt.setString(1, null); +// stmt.setString(2, "John"); +// stmt.setString(3, "Smith"); +// stmt.setInt(4, 20); +// stmt.execute(); +// +// System.out.println("Inserting again into DB"); +// stmt.setString(2, "Jane"); +// stmt.setInt(4, 30); +// stmt.execute(); +// +// stmt.close(); +// +// conn.commit(); +// +// System.out.println("Querying the DB"); +// stmt = conn.prepareStatement("SELECT id, first, last, age FROM EMPLOYEE WHERE age < ?"); +// stmt.setInt(1, 25); +// ResultSet rs = stmt.executeQuery(); +// //STEP 5: Extract data from result set +// while(rs.next()) { +// //Retrieve by column name +// int id = rs.getInt("id"); +// int age = rs.getInt("age"); +// String first = rs.getString("first"); +// String last = rs.getString("last"); +// +// //Display values +// //* +// System.out.print("ID: " + id); +// System.out.print(", Age: " + age); +// System.out.print(", First: " + first); +// System.out.println(", Last: " + last); +// //*/ +// } +// +// System.out.println("Querying again"); +// stmt.setInt(1, 35); +// rs = stmt.executeQuery(); +// //STEP 5: Extract data from result set +// while(rs.next()) { +// //Retrieve by column name +// int id = rs.getInt("id"); +// int age = rs.getInt("age"); +// String first = rs.getString("first"); +// String last = rs.getString("last"); +// +// //Display values +// //* +// System.out.print("ID: " + id); +// System.out.print(", Age: " + age); +// System.out.print(", First: " + first); +// System.out.println(", Last: " + last); +// //*/ +// } +// +// +// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\""; +// //stmt.execute(sql); +// +// //sql = "DROP TABLE IF EXISTS EMPLOYEE"; +// //stmt.execute(sql); +// +// //STEP 6: Clean-up environment +// rs.close(); +// stmt.close(); +// conn.close(); +// } catch(SQLException se) { +// //Handle errors for JDBC +// se.printStackTrace(); +// } catch (Exception e) { +// //Handle errors for Class.forName +// e.printStackTrace(); +// } finally { +// //finally block used to close resources +// try { +// if(stmt!=null) +// stmt.close(); +// } catch(SQLException se2) { +// } +// try { +// if(conn!=null) +// conn.close(); +// } catch(SQLException se) { +// se.printStackTrace(); +// } +// } +// System.out.println("Done"); +// } +// +// +// /** +// * Connects to a generic database. Can be used for mdbc or jdbc +// * @param DBC_DRIVER the driver for which to register (Class.forName(DBC_DRIVER)) +// * @param DB_URL the URL for the database we are testing +// * @param connectionProps +// */ +// private void connectViaDriverManager(final String DBC_DRIVER, final String DB_URL, Properties connectionProps) { +// Connection conn = null; +// Statement stmt = null; +// try { +// +// //Server server = Server.createTcpServer("-tcpAllowOthers").start(); +// //STEP 2: Register JDBC driver +// Class.forName(DBC_DRIVER); +// +// //STEP 3: Open a connection +// conn = DriverManager.getConnection(DB_URL, connectionProps); +// conn.setAutoCommit(false); +// +// //STEP 4: Execute a query +// stmt = conn.createStatement(); +// String sql; +// +// //sql = "DROP TABLE EMPLOYEE"; +// //stmt.execute(sql); +// +// sql = "CREATE TABLE IF NOT EXISTS EMPLOYEE (id INT primary key, first VARCHAR(20), last VARCHAR(20), age INT);"; +// stmt.execute(sql); +// +// sql = "INSERT INTO EMPLOYEE (id, first, last, age) VALUES (\"34\", \"Jane4\", \"Doe4\", \"40\")"; +// stmt.execute(sql); +// +// sql = "SELECT id, first, last, age FROM EMPLOYEE"; +// ResultSet rs = stmt.executeQuery(sql); +// +// //STEP 5: Extract data from result set +// while(rs.next()) { +// //Retrieve by column name +// int id = rs.getInt("id"); +// int age = rs.getInt("age"); +// String first = rs.getString("first"); +// String last = rs.getString("last"); +// +// //Display values +// //* +// System.out.print("ID: " + id); +// System.out.print(", Age: " + age); +// System.out.print(", First: " + first); +// System.out.println(", Last: " + last); +// //*/ +// +// } +// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\""; +// //stmt.execute(sql); +// +// //sql = "DROP TABLE IF EXISTS EMPLOYEE"; +// //stmt.execute(sql); +// +// conn.commit(); +// +// //STEP 6: Clean-up environment +// rs.close(); +// stmt.close(); +// conn.close(); +// } catch(SQLException se) { +// //Handle errors for JDBC +// se.printStackTrace(); +// } catch (Exception e) { +// //Handle errors for Class.forName +// e.printStackTrace(); +// } finally { +// //finally block used to close resources +// try { +// if(stmt!=null) +// stmt.close(); +// } catch(SQLException se2) { +// } +// try { +// if(conn!=null) +// conn.close(); +// } catch(SQLException se) { +// se.printStackTrace(); +// } +// } +// } +// +// +// +// /** +// * Must be mysql datasource +// * @throws Exception +// */ +// //@Test +// public void test04() throws Exception { +// String dbConnectionName = "testing"; +// String dbUserId = "alice"; +// String dbPasswd = "bob"; +// String db_url = "jdbc:mysql://localhost/EMP"; +// MysqlDataSource dataSource = new MysqlDataSource(); +// dataSource.setUser(dbUserId); +// dataSource.setPassword(dbPasswd); +// dataSource.setURL(db_url); +// +// +// Connection con = dataSource.getConnection(); +// Statement st = con.createStatement(); +// ResultSet rs = null; +// +// //FIXME CREATE EMPLOYEE TABLE +// +// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) { +// rs = st.getResultSet(); +// } +// +// rs = st.executeQuery("select * from EMPLOYEE;"); +// while (rs.next()) { +// System.out.println(rs.getString("name")); +// } +// +// if (st.execute("DELETE FROM EMPLOYEE")) { +// rs = st.getResultSet(); +// } +// rs.close(); +// st.close(); +// con.close(); +// } +// +// /** +// * Test connection to mysql datasource class +// * @throws Exception +// */ +// @Test +// public void test05() throws Exception { +// String dbConnectionName = "testing"; +// String dbUserId = "alice"; +// String dbPasswd = "bob"; +// String db_url = "jdbc:mdbc://localhost/EMP"; +// String db_type = "mysql"; +// MdbcDataSource dataSource = new MdbcDataSource(); +// dataSource.setUser(dbUserId); +// dataSource.setPassword(dbPasswd); +// dataSource.setURL(db_url); +// dataSource.setDBType(db_type); +// +// Connection con = dataSource.getConnection(); +// Statement st = con.createStatement(); +// ResultSet rs = null; +// +// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) { +// rs = st.getResultSet(); +// } +// +// rs = st.executeQuery("select * from EMPLOYEE;"); +// while (rs.next()) { +// System.out.println(rs.getString("name")); +// } +// +// if (st.execute("DELETE FROM EMPLOYEE")) { +// rs = st.getResultSet(); +// } +// rs.close(); +// st.close(); +// con.close(); +// } +} diff --git a/src/main/java/org/onap/music/mdbc/tests/MAIN.java b/src/main/java/org/onap/music/mdbc/tests/MAIN.java new file mode 100755 index 0000000..160868b --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/MAIN.java @@ -0,0 +1,106 @@ +package org.onap.music.mdbc.tests; + +import java.io.FileInputStream; +import java.io.InputStream; +import java.lang.reflect.Constructor; +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.Logger; +import org.json.JSONArray; +import org.json.JSONObject; +import org.json.JSONTokener; + +/** + * Run all the tests against all the configurations specified in /tests.json. + * + * @author Robert Eby + */ +public class MAIN { + public static final String CONFIG = "/tests.json"; + + /** + * This class runs all the tests against all the configurations specified in /tests.json. + * It assumes that a copy of Cassandra is running locally on port 9042, that a copy of H2 + * server is is running locally on port 8082, and that a copy of MySQL is running locally + * on port 3306. These can be adjusted by editing the /tests.json file. + * + * @param args command line arguments + * @throws Exception if anything goes wrong + */ + public static void main(String[] args) throws Exception { + new MAIN(args).run(); + System.exit(0); + } + + private JSONArray configs; + private List tests; + private int total_success, total_failure; + + public MAIN(String[] args) throws Exception { + configs = null; + tests = new ArrayList(); + total_success = total_failure = 0; + + InputStream is = null; + if (args.length == 0) { + is = this.getClass().getResourceAsStream(CONFIG); + } else { + is = new FileInputStream(args[0]); + } + if (is != null) { + JSONObject jo = new JSONObject(new JSONTokener(is)); + is.close(); + configs = jo.getJSONArray("configs"); + + JSONArray ja = jo.getJSONArray("tests"); + for (int i = 0; i < ja.length(); i++) { + Class cl = Class.forName(ja.getString(i).trim()); + if (cl != null) { + Constructor con = cl.getConstructor(); + tests.add((Test) con.newInstance()); + } + } + } else { + String conf = (args.length == 0) ? CONFIG : args[0]; + throw new Exception("Cannot find configuration resource: "+conf); + } + } + public void run() { + Logger logger = Logger.getLogger(this.getClass()); + for (int ix = 0; ix < configs.length(); ix++) { + JSONObject config = configs.getJSONObject(ix); + int succ = 0, fail = 0; + logger.info("*** Testing with configuration: "+config.getString("description")); + System.out.println("Testing with configuration: "+config.getString("description")); + for (Test t : tests) { + String nm = t.getName() + " ............................................................"; + System.out.print(" Test: "+nm.substring(0, 60)); + try { + List msgs = t.run(config); + if (msgs == null || msgs.size() == 0) { + succ++; + System.out.println(" OK!"); + } else { + fail++; + System.out.println(" Fail!"); + System.out.flush(); + for (String m : msgs) { + System.out.println(" "+m); + } + System.out.flush(); + } + } catch (Exception x) { + fail++; + System.out.println(" Fail!"); + } + } + System.out.println(); + total_success += succ; + total_failure += fail; + } + String m = "Testing completed: "+total_success+" successful tests, "+total_failure+": failures."; + logger.info(m); + System.out.println(m); + } +} diff --git a/src/main/java/org/onap/music/mdbc/tests/Test.java b/src/main/java/org/onap/music/mdbc/tests/Test.java new file mode 100755 index 0000000..67a78c8 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/Test.java @@ -0,0 +1,105 @@ +package org.onap.music.mdbc.tests; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Properties; + +import org.json.JSONArray; +import org.json.JSONObject; + +/** + * Provides the abstract interface for a Test, as well as some common functions. + * + * @author Robert Eby + */ +public abstract class Test { + public static final String MDBC_DRIVER = "org.onap.music.mdbc.ProxyDriver"; + + /** + * Each test derived from this class must implement this method, + * which runs the test and produces a list of error messages. + * + * @param config a JSONObject describing the configuration to use for this run of the test + * @return the list of messages. If the list is empty, the test is considered to have run + * successfully. + */ + abstract public List run(JSONObject config); + + public String getName() { + String s = this.getClass().getName(); + return s.replaceAll("org.onap.music.mdbc.tests.", ""); + } + + public Properties buildProperties(JSONObject config, int i) { + Properties p = new Properties(); + for (String key : config.keySet()) { + if (key.equals("connections")) { + JSONArray ja = config.getJSONArray("connections"); + JSONObject connection = ja.getJSONObject(i); + for (String key2 : connection.keySet()) { + p.setProperty(key2, connection.getString(key2)); + } + } else { + p.setProperty(key, config.getString(key)); + } + } + return p; + } + + public Connection getDBConnection(Properties pr) throws SQLException, ClassNotFoundException { + Class.forName(MDBC_DRIVER); + String url = pr.getProperty("url"); + return DriverManager.getConnection(url, pr); + } + + public void assertNotNull(Object o) throws Exception { + if (o == null) + throw new Exception("Object is null"); + } + + public void assertTableContains(int connid, Connection conn, String tbl, Object... kv) throws Exception { + ResultSet rs = getRow(conn, tbl, kv); + boolean throwit = !rs.next(); + rs.close(); + if (throwit) { + throw new Exception("Conn id "+connid+" Table "+tbl+" does not have a row with "+catkeys(kv)); + } + } + public void assertTableDoesNotContain(int connid, Connection conn, String tbl, Object... kv) throws Exception { + boolean throwit = true; + try { + assertTableContains(connid, conn, tbl, kv); + } catch (Exception x) { + throwit = false; + } + if (throwit) { + throw new Exception("Conn id "+connid+" Table "+tbl+" does have a row with "+catkeys(kv)); + } + } + public ResultSet getRow(Connection conn, String tbl, Object... kv) throws SQLException { + Statement stmt = conn.createStatement(); + StringBuilder sql = new StringBuilder("SELECT * FROM ") + .append(tbl) + .append(" WHERE ") + .append(catkeys(kv)); + return stmt.executeQuery(sql.toString()); + } + public String catkeys(Object... kv) { + StringBuilder sql = new StringBuilder(); + String pfx = ""; + for (int i = 0; (i+1) < kv.length; i += 2) { + sql.append(pfx).append(kv[i]).append("="); + if (kv[i+1] instanceof String) { + sql.append("'").append(kv[i+1]).append("'"); + } else { + sql.append(kv[i+1].toString()); + } + pfx = " AND "; + } + return sql.toString(); + } +} diff --git a/src/main/java/org/onap/music/mdbc/tests/Test_Delete.java b/src/main/java/org/onap/music/mdbc/tests/Test_Delete.java new file mode 100755 index 0000000..6417ab7 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/Test_Delete.java @@ -0,0 +1,70 @@ +package org.onap.music.mdbc.tests; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import org.json.JSONArray; +import org.json.JSONObject; + +/** + * Test that DELETEs work on the original DB, and are correctly copied to replica DBs. + * + * @author Robert Eby + */ +public class Test_Delete extends Test { + private final String TBL = "DELTABLE"; + + @Override + public List run(JSONObject config) { + List msgs = new ArrayList(); + JSONArray connections = config.getJSONArray("connections"); + Connection[] conn = new Connection[connections.length()]; + Statement[] stmt = new Statement[conn.length]; + try { + for (int i = 0; i < conn.length; i++) { + conn[i] = getDBConnection(buildProperties(config, i)); + assertNotNull(conn[i]); + stmt[i] = conn[i].createStatement(); + assertNotNull(stmt[i]); + } + + try { + for (int i = 0; i < conn.length; i++) { + conn[i].setAutoCommit(true); + stmt[i].execute("CREATE TABLE IF NOT EXISTS DELTABLE(ID_ varchar(255), RANDOMTXT varchar(255), primary key (ID_))"); + } + stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('1', 'Everything''s Negotiable Except Cutting Medicaid')"); + stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('2', 'Can a Sideways Elevator Help Designers Build Taller Skyscrapers?')"); + stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('3', 'Can a Bernie Sanders Ally Win the Maryland Governor''s Mansion?')"); + for (int i = 0; i < conn.length; i++) { + assertTableContains(i, conn[i], TBL, "ID_", "1"); + assertTableContains(i, conn[i], TBL, "ID_", "2"); + assertTableContains(i, conn[i], TBL, "ID_", "3"); + } + + stmt[0].execute("DELETE FROM DELTABLE WHERE ID_ = '1'"); + for (int i = 0; i < conn.length; i++) { + assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1"); + assertTableContains(i, conn[i], TBL, "ID_", "2"); + assertTableContains(i, conn[i], TBL, "ID_", "3"); + } + } catch (Exception e) { + msgs.add(e.toString()); + } finally { + for (int i = 0; i < stmt.length; i++) { + if (stmt[i] != null) + stmt[i].close(); + } + for (int i = 0; i < conn.length; i++) { + if (conn[i] != null) + conn[i].close(); + } + } + } catch (Exception e) { + msgs.add(e.toString()); + } + return msgs; + } +} diff --git a/src/main/java/org/onap/music/mdbc/tests/Test_Insert.java b/src/main/java/org/onap/music/mdbc/tests/Test_Insert.java new file mode 100755 index 0000000..1ea0908 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/Test_Insert.java @@ -0,0 +1,94 @@ +package org.onap.music.mdbc.tests; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import org.json.JSONArray; +import org.json.JSONObject; + +/** + * Test that INSERTs work to the original DB, and are correctly copied to replica DBs. + * + * @author Robert Eby + */ +public class Test_Insert extends Test { + private final String PERSON = "PERSON"; + private final String SONG = "SONG"; + + @Override + public List run(JSONObject config) { + List msgs = new ArrayList(); + JSONArray connections = config.getJSONArray("connections"); + Connection[] conn = new Connection[connections.length()]; + Statement[] stmt = new Statement[conn.length]; + try { + for (int i = 0; i < conn.length; i++) { + conn[i] = getDBConnection(buildProperties(config, i)); + assertNotNull(conn[i]); + stmt[i] = conn[i].createStatement(); + assertNotNull(stmt[i]); + } + + try { + for (int i = 0; i < conn.length; i++) { + conn[i].setAutoCommit(true); + stmt[i].execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))"); + } + stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Zaphod', '111-22-3333')"); + stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Ripley', '444-55-6666')"); + stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Spock', '777-88-9999')"); + for (int i = 0; i < conn.length; i++) { + assertTableContains(i, conn[i], PERSON, "ID_", "1"); + assertTableContains(i, conn[i], PERSON, "ID_", "2"); + assertTableContains(i, conn[i], PERSON, "ID_", "3"); + } + + stmt[0].execute("UPDATE PERSON SET NAME = 'Jabba' WHERE ID_ = '2'"); + for (int i = 0; i < conn.length; i++) { + ResultSet rs = getRow(conn[i], PERSON, "ID_", "2"); + if (rs.next()) { + String v = rs.getString("NAME"); + if (!v.equals("Jabba")) + throw new Exception("Table PERSON, row with ID_ = '2' was not updated."); + } else { + throw new Exception("Table PERSON does not have a row with ID_ = '2'"); + } + rs.close(); + } + + for (int i = 0; i < conn.length; i++) { + stmt[i].execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))"); + } + stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')"); + stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')"); + stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')"); + stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')"); + stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')"); + for (int i = 0; i < conn.length; i++) { + assertTableContains(i, conn[i], SONG, "ID_", "1", "PREF", 1); + assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 5); + assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 2); + assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 77); + assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 69); + } + } catch (Exception e) { + msgs.add(e.toString()); + } finally { + for (int i = 0; i < stmt.length; i++) { + if (stmt[i] != null) + stmt[i].close(); + } + for (int i = 0; i < conn.length; i++) { + if (conn[i] != null) + conn[i].close(); + } + } + } catch (Exception e) { + msgs.add(e.toString()); + } + return msgs; + } +} diff --git a/src/main/java/org/onap/music/mdbc/tests/Test_Transactions.java b/src/main/java/org/onap/music/mdbc/tests/Test_Transactions.java new file mode 100755 index 0000000..787f1f5 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/Test_Transactions.java @@ -0,0 +1,74 @@ +package org.onap.music.mdbc.tests; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import org.json.JSONArray; +import org.json.JSONObject; + +/** + * Test that transactions work between the original DB, and replica DBs. + * + * @author Robert Eby + */ +public class Test_Transactions extends Test { + private final String TBL = "TRANSTEST"; + + @Override + public List run(JSONObject config) { + List msgs = new ArrayList(); + JSONArray connections = config.getJSONArray("connections"); + Connection[] conn = new Connection[connections.length()]; + Statement[] stmt = new Statement[conn.length]; + try { + for (int i = 0; i < conn.length; i++) { + conn[i] = getDBConnection(buildProperties(config, i)); + assertNotNull(conn[i]); + stmt[i] = conn[i].createStatement(); + assertNotNull(stmt[i]); + } + + try { + for (int i = 0; i < conn.length; i++) { + conn[i].setAutoCommit(true); + stmt[i].execute("CREATE TABLE IF NOT EXISTS TRANSTEST(ID_ varchar(12), STUFF varchar(255), primary key (ID_))"); + conn[i].setAutoCommit(false); + } + stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('1', 'CenturyLink Now Under Fire on All Sides For Fraudulent Billing')"); + stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('2', 'Netflix Now in Half of All Broadband Households, Study Says')"); + stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('3', 'Private Data Of 6 Million Verizon Customers Exposed')"); + assertTableContains(0, conn[0], TBL, "ID_", "1"); + assertTableContains(0, conn[0], TBL, "ID_", "2"); + assertTableContains(0, conn[0], TBL, "ID_", "3"); + for (int i = 1; i < conn.length; i++) { + assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1"); + assertTableDoesNotContain(i, conn[i], TBL, "ID_", "2"); + assertTableDoesNotContain(i, conn[i], TBL, "ID_", "3"); + } + conn[0].commit(); + for (int i = 0; i < conn.length; i++) { + assertTableContains(i, conn[i], TBL, "ID_", "1"); + assertTableContains(i, conn[i], TBL, "ID_", "2"); + assertTableContains(i, conn[i], TBL, "ID_", "3"); + } + + } catch (Exception e) { + msgs.add(e.toString()); + } finally { + for (int i = 0; i < stmt.length; i++) { + if (stmt[i] != null) + stmt[i].close(); + } + for (int i = 0; i < conn.length; i++) { + if (conn[i] != null) + conn[i].close(); + } + } + } catch (Exception e) { + msgs.add(e.toString()); + } + return msgs; + } +} diff --git a/src/main/java/org/onap/music/mdbc/tests/package-info.java b/src/main/java/org/onap/music/mdbc/tests/package-info.java new file mode 100755 index 0000000..7e0b84d --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tests/package-info.java @@ -0,0 +1,165 @@ +/** + *

+ * This package provides a testing harness to test the various features of MDBC against + * multiple combinations of database and MUSIC mixins. The configurations (consisting of + * database information and mixin combinations) to test, as well as the specific tests to + * run are all defined in the configuration file test.json. + *

+ *

+ * To run the tests against all the configurations specified in /tests.json, do the following: + *

+ *
+ * 	java org.onap.music.mdbc.tests.MAIN [ configfile ]
+ * 
+ *

+ * It is assumed that a copy of Cassandra is running locally on port 9042, + * that a copy of H2 server is is running locally on port 8082, + * and that a copy of MySQL (or MariaDB) is running locally on port 3306. + * These can be adjusted by editing the /tests.json file. + *

+ *

+ * When building a copy of MDBC for production use, this package can be safely removed. + *

+ *

+ * The initial copy of tests.json is as follows: + *

+ *
+ * {
+ *	"tests": [
+ *		"org.onap.music.mdbc.tests.Test_Insert",
+ *		"org.onap.music.mdbc.tests.Test_Delete",
+ *		"org.onap.music.mdbc.tests.Test_Transactions"
+ *	],
+ *	"configs": [
+ *		{
+ *			"description": "H2 with Cassandra with two connections",
+ *			"MDBC_DB_MIXIN": "h2",
+ *			"MDBC_MUSIC_MIXIN": "cassandra",
+ *			"replicas": "0,1",
+ *			"music_keyspace": "mdbctest1",
+ *			"music_address": "localhost",
+ *			"music_rfactor": "1",
+ *			"connections": [
+ *				{
+ *					"name": "Connection 0",
+ *					"url": "jdbc:mdbc:mem:db0",
+ *					"user": "",
+ *					"password": "",
+ *					"myid": "0"
+ *				},
+ *				{
+ *					"name": "Connection 1",
+ *					"url": "jdbc:mdbc:mem:db1",
+ *					"user": "",
+ *					"password": "",
+ *					"myid": "1"
+ *				}
+ *			]
+ *		},
+ *		{
+ *			"description": "H2 with Cassandra2 with three connections",
+ *			"MDBC_DB_MIXIN": "h2",
+ *			"MDBC_MUSIC_MIXIN": "cassandra2",
+ *			"replicas": "0,1,2",
+ *			"music_keyspace": "mdbctest2",
+ *			"music_address": "localhost",
+ *			"music_rfactor": "1",
+ *			"user": "",
+ *			"password": "",
+ *			"connections": [
+ *				{
+ *					"name": "Connection 0",
+ *					"url": "jdbc:mdbc:mem:db0",
+ *					"myid": "0"
+ *				},
+ *				{
+ *					"name": "Connection 1",
+ *					"url": "jdbc:mdbc:mem:db1",
+ *					"myid": "1"
+ *				},
+ *				{
+ *					"name": "Connection 2",
+ *					"url": "jdbc:mdbc:mem:db2",
+ *					"myid": "2"
+ *				}
+ *			]
+ *		},
+ *		{
+ *			"description": "H2 Server with Cassandra2 with two connections",
+ *			"MDBC_DB_MIXIN": "h2server",
+ *			"MDBC_MUSIC_MIXIN": "cassandra2",
+ *			"replicas": "0,1",
+ *			"music_keyspace": "mdbctest3",
+ *			"music_address": "localhost",
+ *			"music_rfactor": "1",
+ *			"connections": [
+ *				{
+ *					"name": "Connection 0",
+ *					"url": "jdbc:mdbc:tcp://localhost/mdbc0",
+ *					"user": "",
+ *					"password": "",
+ *					"myid": "0"
+ *				},
+ *				{
+ *					"name": "Connection 1",
+ *					"url": "jdbc:mdbc:tcp://localhost/mdbc1",
+ *					"user": "",
+ *					"password": "",
+ *					"myid": "1"
+ *				}
+ *			]
+ *		},
+ *		{
+ *			"description": "MySQL with Cassandra2 with two connections",
+ *			"MDBC_DB_MIXIN": "mysql",
+ *			"MDBC_MUSIC_MIXIN": "cassandra2",
+ *			"replicas": "0,1,2",
+ *			"music_keyspace": "mdbctest4",
+ *			"music_address": "localhost",
+ *			"music_rfactor": "1",
+ *			"user": "root",
+ *			"password": "abc123",
+ *			"connections": [
+ *				{
+ *					"name": "Connection 0",
+ *					"url": "jdbc:mdbc://127.0.0.1:3306/mdbc",
+ *					"myid": "0"
+ *				},
+ *				{
+ *					"name": "Connection 1",
+ *					"url": "jdbc:mdbc://127.0.0.1:3306/mdbc2",
+ *					"myid": "1"
+ *				}
+ *			]
+ *		},
+ *		{
+ *			"description": "H2 (DB #1) and MySQL (DB #2) with Cassandra2",
+ *			"MDBC_MUSIC_MIXIN": "cassandra2",
+ *			"replicas": "0,1",
+ *			"music_keyspace": "mdbctest5",
+ *			"music_address": "localhost",
+ *			"music_rfactor": "1",
+ *			"connections": [
+ *				{
+ *					"name": "Connection 0",
+ *					"MDBC_DB_MIXIN": "h2",
+ *					"url": "jdbc:mdbc:mem:db9",
+ *					"user": "",
+ *					"password": "",
+ *					"myid": "0"
+ *				},
+ *				{
+ *					"name": "Connection 1",
+ *					"MDBC_DB_MIXIN": "mysql",
+ *					"url": "jdbc:mdbc://127.0.0.1:3306/mdbc3",
+ *					"user": "root",
+ *					"password": "abc123",
+ *					"myid": "1"
+ *				}
+ *			]
+ *		}
+ *	]
+ * }
+ * 
+ */ +package org.onap.music.mdbc.tests; diff --git a/src/main/java/org/onap/music/mdbc/tools/CreateNodeConfigurations.java b/src/main/java/org/onap/music/mdbc/tools/CreateNodeConfigurations.java new file mode 100644 index 0000000..c4ebf46 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tools/CreateNodeConfigurations.java @@ -0,0 +1,70 @@ +package org.onap.music.mdbc.tools; + +import org.onap.music.exceptions.MDBCServiceException; +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.configurations.NodeConfiguration; +import org.onap.music.mdbc.configurations.TablesConfiguration; +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +import java.io.FileNotFoundException; +import java.util.List; + +public class CreateNodeConfigurations { + public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreateNodeConfigurations.class); + + private String tables; + @Parameter(names = { "-t", "--table-configurations" }, required = true, + description = "This is the input file that is going to have the configuration for all the tables and partitions") + private String tableConfigurationsFile; + @Parameter(names = { "-b", "--basename" }, required = true, + description = "This base name for all the outputs files that are going to be created") + private String basename; + @Parameter(names = { "-o", "--output-dir" }, required = true, + description = "This is the output directory that is going to contain all the configuration file to be generated") + private String outputDirectory; + @Parameter(names = { "-h", "-help", "--help" }, help = true, + description = "Print the help message") + private boolean help = false; + + private TablesConfiguration inputConfig; + + public CreateNodeConfigurations(){} + + + public void readInput(){ + try { + inputConfig = TablesConfiguration.readJsonFromFile(tableConfigurationsFile); + } catch (FileNotFoundException e) { + LOG.error("Input file is invalid or not found"); + System.exit(1); + } + } + + public void createAndSaveNodeConfigurations(){ + List nodes = null; + try { + nodes = inputConfig.initializeAndCreateNodeConfigurations(); + } catch (MDBCServiceException e) { + e.printStackTrace(); + } + int counter = 0; + for(NodeConfiguration nodeConfig : nodes){ + String name = (nodeConfig.nodeName==null||nodeConfig.nodeName.isEmpty())?Integer.toString(counter++): nodeConfig.nodeName; + nodeConfig.saveToFile(outputDirectory+"/"+basename+"-"+name+".json"); + } + } + + public static void main(String[] args) { + CreateNodeConfigurations configs = new CreateNodeConfigurations(); + @SuppressWarnings("deprecation") + JCommander jc = new JCommander(configs, args); + if (configs.help) { + jc.usage(); + System.exit(1); + return; + } + configs.readInput(); + configs.createAndSaveNodeConfigurations(); + } +} diff --git a/src/main/java/org/onap/music/mdbc/tools/CreatePartition.java b/src/main/java/org/onap/music/mdbc/tools/CreatePartition.java new file mode 100644 index 0000000..53bbc53 --- /dev/null +++ b/src/main/java/org/onap/music/mdbc/tools/CreatePartition.java @@ -0,0 +1,59 @@ +package org.onap.music.mdbc.tools; + +import org.onap.music.logging.EELFLoggerDelegate; +import org.onap.music.mdbc.configurations.NodeConfiguration; +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +public class CreatePartition { + public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreatePartition.class); + + @Parameter(names = { "-t", "--tables" }, required = true, + description = "This is the tables that are assigned to this ") + private String tables; + @Parameter(names = { "-f", "--file" }, required = true, + description = "This is the output file that is going to have the configuration for the ranges") + private String file; + @Parameter(names = { "-i", "--mri-index" }, required = true, + description = "Index in the Mri Table") + private String mriIndex; + @Parameter(names = { "-m", "--mri-table-name" }, required = true, + description = "Mri Table name") + private String mriTable; + @Parameter(names = { "-r", "--music-tx-digest-table-name" }, required = true, + description = "Music Transaction Digest Table name") + private String mtxdTable; + @Parameter(names = { "-p", "--partition-id" }, required = true, + description = "Partition Id") + private String partitionId; + @Parameter(names = { "-h", "-help", "--help" }, help = true, + description = "Print the help message") + private boolean help = false; + + NodeConfiguration config; + + public CreatePartition(){ + } + + public void convert(){ + config = new NodeConfiguration(tables, mriIndex,mriTable,partitionId,"test","", mtxdTable); + } + + public void saveToFile(){ + config.saveToFile(file); + } + + public static void main(String[] args) { + + CreatePartition newPartition = new CreatePartition(); + @SuppressWarnings("deprecation") + JCommander jc = new JCommander(newPartition, args); + if (newPartition.help) { + jc.usage(); + System.exit(1); + return; + } + newPartition.convert(); + newPartition.saveToFile(); + } +} diff --git a/src/main/javadoc/overview.html b/src/main/javadoc/overview.html index 162e7ec..0be8c38 100755 --- a/src/main/javadoc/overview.html +++ b/src/main/javadoc/overview.html @@ -13,15 +13,15 @@ MDBC can automatically adopt to the database in use, as well as the style of MUS by the use of "mixins". Each JDBC Connection via MDBC specifies two mixins to use:

    -
  • a Mixin that conforms to the {@link com.att.research.mdbc.mixins.MusicInterface} specification +
  • a Mixin that conforms to the {@link org.onap.music.mdbc.mixins.MusicInterface} specification for communicating with MUSIC/Cassandra.
  • -
  • a Mixin that conforms to the {@link com.att.research.mdbc.mixins.DBInterface} specification +
  • a Mixin that conforms to the {@link org.onap.music.mdbc.mixins.DBInterface} specification for communicating with the underlying database in use.

More details are provided on the package pages for -com.att.research.mdbc and -com.att.research.mdbc.mixins. +org.onap.music.mdbc and +org.onap.music.mdbc.mixins.

Limitations

There are several limitations to the use of MDBC: diff --git a/src/main/resources/META-INF/services/java.sql.Driver b/src/main/resources/META-INF/services/java.sql.Driver index 7228fe7..a135284 100755 --- a/src/main/resources/META-INF/services/java.sql.Driver +++ b/src/main/resources/META-INF/services/java.sql.Driver @@ -1 +1 @@ -com.att.research.mdbc.ProxyDriver +org.onap.music.mdbc.ProxyDriver diff --git a/src/main/resources/mdbc.properties b/src/main/resources/mdbc.properties index f6e722c..3e207aa 100755 --- a/src/main/resources/mdbc.properties +++ b/src/main/resources/mdbc.properties @@ -2,11 +2,11 @@ # A list of all Mixins that should be checked by MDBC # MIXINS= \ - com.att.research.mdbc.mixins.H2Mixin \ - com.att.research.mdbc.mixins.H2ServerMixin \ - com.att.research.mdbc.mixins.MySQLMixin \ - com.att.research.mdbc.mixins.CassandraMixin \ - com.att.research.mdbc.mixins.Cassandra2Mixin + org.onap.music.mdbc.mixins.H2Mixin \ + org.onap.music.mdbc.mixins.H2ServerMixin \ + org.onap.music.mdbc.mixins.MySQLMixin \ + org.onap.music.mdbc.mixins.CassandraMixin \ + org.onap.music.mdbc.mixins.Cassandra2Mixin critical.tables= \ TEST \ No newline at end of file diff --git a/src/main/resources/mdbc_driver.properties b/src/main/resources/mdbc_driver.properties index 1549d5f..487feb3 100644 --- a/src/main/resources/mdbc_driver.properties +++ b/src/main/resources/mdbc_driver.properties @@ -2,11 +2,11 @@ # A list of all Mixins that should be checked by MDBC # MIXINS= \ - com.att.research.mdbc.mixins.H2Mixin \ - com.att.research.mdbc.mixins.H2ServerMixin \ - com.att.research.mdbc.mixins.MySQLMixin \ - com.att.research.mdbc.mixins.CassandraMixin \ - com.att.research.mdbc.mixins.Cassandra2Mixin + org.onap.music.mdbc.mixins.H2Mixin \ + org.onap.music.mdbc.mixins.H2ServerMixin \ + org.onap.music.mdbc.mixins.MySQLMixin \ + org.onap.music.mdbc.mixins.CassandraMixin \ + org.onap.music.mdbc.mixins.Cassandra2Mixin DEFAULT_DRIVERS=\ org.h2.Driver \ diff --git a/src/main/resources/tests.json b/src/main/resources/tests.json index ac9221e..8df9f34 100755 --- a/src/main/resources/tests.json +++ b/src/main/resources/tests.json @@ -1,8 +1,8 @@ { "tests": [ - "com.att.research.mdbc.tests.Test_Insert", - "com.att.research.mdbc.tests.Test_Delete", - "com.att.research.mdbc.tests.Test_Transactions" + "org.onap.music.mdbc.tests.Test_Insert", + "org.onap.music.mdbc.tests.Test_Delete", + "org.onap.music.mdbc.tests.Test_Transactions" ], "configs": [ { diff --git a/src/main/shell/mk_jboss_module b/src/main/shell/mk_jboss_module index 28d0540..7bbb8d9 100755 --- a/src/main/shell/mk_jboss_module +++ b/src/main/shell/mk_jboss_module @@ -22,7 +22,7 @@ then fi T=/tmp/mk_jboss_module$$ -T2=$T/com/att/research/mdbc/main +T2=$T/org/onap/music/mdbc/main MODULE=$T2/module.xml TARGET=`pwd`/target/mdbc-jboss-module.tar JARS=$( mvn dependency:build-classpath | grep -v INFO | tr : '\012' ) @@ -38,7 +38,7 @@ cat > $MODULE < - + EOF for i in $JAR2; do echo " "; done >> $MODULE diff --git a/src/test/java/com/att/research/mdbc/MDBCUtilsTest.java b/src/test/java/com/att/research/mdbc/MDBCUtilsTest.java deleted file mode 100644 index 7e25fe0..0000000 --- a/src/test/java/com/att/research/mdbc/MDBCUtilsTest.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.att.research.mdbc; - -import com.att.research.mdbc.tables.OperationType; -import com.att.research.mdbc.tables.StagingTable; - -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.Test; - -import java.io.IOException; -import java.util.HashMap; - -import static org.junit.Assert.*; - -public class MDBCUtilsTest { - - @Test - public void toStringTest1() { - StagingTable table = new StagingTable(); - table.addOperation("test",OperationType.INSERT,(new JSONObject(new String[]{"test3", "Test4"})).toString()); - String output=null; - try { - output = MDBCUtils.toString(table); - } catch (IOException e) { - e.printStackTrace(); - fail(); - } - assertTrue(output!=null); - assertTrue(!output.isEmpty()); - } - - @Test - public void toStringTest2() { - HashMap mapToSerialize = new HashMap<>(); - StagingTable table = new StagingTable(); - table.addOperation("test",OperationType.INSERT,(new JSONObject(new String[]{"test3", "Test4"})).toString()); - mapToSerialize.put("table",table); - String output=null; - try { - output = MDBCUtils.toString(mapToSerialize); - } catch (IOException e) { - e.printStackTrace(); - fail(); - } - assertTrue(output!=null); - assertTrue(!output.isEmpty()); - } - - @Test - public void toStringTest3() { - String testStr = "test"; - OperationType typeTest = OperationType.INSERT; - String output=null; - try { - output = MDBCUtils.toString(testStr); - } catch (IOException e) { - e.printStackTrace(); - fail(); - } - assertTrue(output!=null); - assertTrue(!output.isEmpty()); - output=null; - try { - output = MDBCUtils.toString(typeTest); - } catch (IOException e) { - e.printStackTrace(); - fail(); - } - assertTrue(output!=null); - assertTrue(!output.isEmpty()); - } -} \ No newline at end of file diff --git a/src/test/java/com/att/research/mdbc/test/ALLTESTS.java b/src/test/java/com/att/research/mdbc/test/ALLTESTS.java deleted file mode 100755 index c36e94c..0000000 --- a/src/test/java/com/att/research/mdbc/test/ALLTESTS.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.att.research.mdbc.test; - -import org.junit.runner.RunWith; -import org.junit.runners.Suite; - -@RunWith(Suite.class) -@Suite.SuiteClasses({ - //BasicTest.class, - //CrossSiteTest.class, - //TransactionTest.class -}) - -public class ALLTESTS { -} diff --git a/src/test/java/com/att/research/mdbc/test/BasicTest.java b/src/test/java/com/att/research/mdbc/test/BasicTest.java deleted file mode 100755 index 2b17eba..0000000 --- a/src/test/java/com/att/research/mdbc/test/BasicTest.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.att.research.mdbc.test; - -import static org.junit.Assert.*; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - - -/** - * This is a basic test which creates some tables, does a few selects, adn runs some joins. - * It is mainly intended to make sure that no exceptions are thrown in basic operation. - */ -public class BasicTest extends TestCommon { - private static final String DB_CONNECTION = "avatica://" + "mem:db1"; - private static final String KEYSPACE = "Basic_Test"; - - //@Test - public void test() { - try { - Connection connection = getDBConnection(DB_CONNECTION, KEYSPACE, "0"); - assertNotNull(connection); - System.out.println("GOT conn"); - Statement stmt = connection.createStatement(); - assertNotNull(stmt); - System.out.println("GOT stmt"); - - try { - connection.setAutoCommit(false); - stmt.execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))"); - stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Anju', '111-22-3333')"); - stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Sonia', '111-22-4444')"); - stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Asha', '111-55-6666')"); - dumptable(connection); - - stmt.execute("DELETE FROM PERSON WHERE ID_ = '1'"); - dumptable(connection); - - stmt.execute("UPDATE PERSON SET NAME = 'foobar' WHERE ID_ = '2'"); - dumptable(connection); - - stmt.execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))"); - stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')"); - stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')"); - stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')"); - stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')"); - stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')"); - ResultSet rs = stmt.executeQuery("SELECT * FROM PERSON AS P, SONG AS S WHERE P.ID_ = S.ID_"); - while (rs.next()) { - System.out.println("ID_ " + rs.getInt("ID_") + " Name: " + rs.getString("NAME") + " Aria: " + rs.getString("ARIA")); - } - rs.close(); - stmt.close(); - connection.commit(); - } catch (Exception e) { - fail(e.toString()); - } finally { - connection.close(); - } - } catch (Exception e) { - e.printStackTrace(); - fail(e.toString()); - } - System.out.println("BasicTest.test OK"); - } - - private void dumptable(Connection connection) throws SQLException { - Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT * FROM PERSON"); - while (rs.next()) { - System.out.println("ID_ " + rs.getInt("ID_") + " Name " + rs.getString("name")); - } - stmt.close(); - System.out.println("--"); - } -} diff --git a/src/test/java/com/att/research/mdbc/test/CrossSiteTest.java b/src/test/java/com/att/research/mdbc/test/CrossSiteTest.java deleted file mode 100755 index 71ac54e..0000000 --- a/src/test/java/com/att/research/mdbc/test/CrossSiteTest.java +++ /dev/null @@ -1,447 +0,0 @@ -package com.att.research.mdbc.test; - -import static org.junit.Assert.*; - -import java.io.Reader; -import java.io.StringReader; -import java.sql.CallableStatement; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.Random; - -import org.apache.log4j.Logger; -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - - -/** - * This test tests a copy of data from DB1 to DB2. It tests the following H2 data types: - * VARCHAR, VARBINARY, INTEGER, BOOLEAN, DOUBLE, CLOB, TIMESTAMP. - */ -public class CrossSiteTest extends TestCommon { - private static final String DB_CONNECTION1 = "avatica://" + "mem:db1"; - private static final String DB_CONNECTION2 = "avatica://" + "mem:db2"; - private static final String KEYSPACE = "CrossSite_Test"; - private final static Logger logger = Logger.getLogger(CrossSiteTest.class); - - private Connection db1, db2; - - //@BeforeClass - public static void setUpBeforeClass() throws Exception { - // drop the keyspace - } - - //@Before - public void setUp() throws Exception { - db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); - db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); - } - - //@After - public void tearDown() throws Exception { - db1.close(); - db2.close(); - } - - //@Test - public void testCopyOneToTwo() { - String sql = "CREATE TABLE IF NOT EXISTS DATA(KEY VARCHAR(255), PRIMARY KEY (KEY))"; - createTable(sql); - - // Put data in DB1 - try { - Statement s = db1.createStatement(); - s.execute("INSERT INTO DATA(KEY) VALUES('AAA')"); - s.execute("INSERT INTO DATA(KEY) VALUES('BBB')"); - s.execute("INSERT INTO DATA(KEY) VALUES('CCC')"); - s.execute("INSERT INTO DATA(KEY) VALUES('DDD')"); - db1.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA"); - if (rs.next()) { - int n = rs.getInt(1); - assertEquals(4, n); - } else { - fail("SELECT COUNT(*) produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - // Delete a row - try { - Statement s = db1.createStatement(); - s.execute("DELETE FROM DATA WHERE KEY = 'CCC'"); - db1.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Recheck - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA"); - if (rs.next()) { - int n = rs.getInt(1); - assertEquals(3, n); - } else { - fail("SELECT COUNT(*) produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - System.out.println("CrossSiteTest.testCopyOneToTwo OK"); - } - - //@Test - public void testCopyWithPreparedStatement() { - String sql = "CREATE TABLE IF NOT EXISTS DATA2(KEY VARCHAR(255), PRIMARY KEY (KEY))"; - createTable(sql); - - // Put data in DB1 - try { - Statement s = db1.createStatement(); - PreparedStatement ps = db1.prepareStatement("INSERT INTO DATA2(KEY) VALUES(?)"); - for (String v : new String[] { "WWW", "XXX", "YYY", "ZZZ" } ) { - ps.setString(1, v); - ps.execute(); - } - db1.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA2"); - if (rs.next()) { - int n = rs.getInt(1); - assertEquals(4, n); - } else { - fail("SELECT COUNT(*) produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - System.out.println("CrossSiteTest.testCopyWithPreparedStatement OK"); - } - - //@Test - public void testDataTypes() { - String sql = "CREATE TABLE IF NOT EXISTS DATATYPES(KEY VARCHAR(255), I1 INTEGER, B1 BOOLEAN, D1 DOUBLE, S1 VARCHAR, PRIMARY KEY (KEY))"; - createTable(sql); - - String key = "ThIs Is ThE KeY"; - String key2 = "ThIs Is another KeY"; - String s1 = "The Rain in Spain"; - int i1 = 696969; - boolean b1 = true; - double pi = Math.PI; - double e = Math.E; - - // Put data in DB1 - try { - PreparedStatement ps = db1.prepareStatement("INSERT INTO DATATYPES(KEY, I1, B1, D1, S1) VALUES(?, ?, ?, ?, ?)"); - ps.setString(1, key); - ps.setInt(2, i1); - ps.setBoolean(3, b1); - ps.setDouble(4, pi); - ps.setString(5, s1); - ps.execute(); - - ps.setString(1, key2); - ps.setInt(2, 123456); - ps.setBoolean(3, false); - ps.setDouble(4, e); - ps.setString(5, "Fee fi fo fum!"); - ps.execute(); - db1.commit(); - ps.close(); - } catch (Exception ex) { - fail("1: " + ex.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT * FROM DATATYPES"); - if (rs.next()) { - assertEquals(key, rs.getString(1)); - assertEquals(i1, rs.getInt(2)); - assertEquals(b1, rs.getBoolean(3)); - assertEquals(pi, rs.getDouble(4), 0.0); - assertEquals(s1, rs.getString(5)); - } else { - fail("SELECT * FROM DATATYPES"); - } - } catch (Exception ex) { - logger.error(ex); - ex.printStackTrace(); - fail("2: " + ex.toString()); - } - System.out.println("CrossSiteTest.testDataTypes OK"); - } - - //@Test - public void testIdentityColumn() { - String sql = "CREATE TABLE IF NOT EXISTS IDENTITYTEST(KEY IDENTITY, S1 VARCHAR, T1 TIMESTAMP, PRIMARY KEY (KEY))"; - createTable(sql); - - String s1 = "ThIs Is ThE IDENTITY test"; - Timestamp ts = new Timestamp(-3535344000L); - - // Put data in DB1 - try { - PreparedStatement ps = db1.prepareStatement("INSERT INTO IDENTITYTEST(S1, T1) VALUES(?, ?)"); - ps.setString(1, s1); - ps.setTimestamp(2, ts); - ps.execute(); - db1.commit(); - ps.close(); - } catch (Exception ex) { - fail("testIdentity 1: " + ex.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT * FROM IDENTITYTEST"); - if (rs.next()) { - assertEquals(s1, rs.getString("s1")); - assertEquals(ts, rs.getTimestamp("t1")); - } else { - fail("SELECT * FROM DATATYPES"); - } - } catch (Exception ex) { - logger.error(ex); - ex.printStackTrace(); - fail("testIdentity 2: " + ex.toString()); - } - System.out.println("CrossSiteTest.testIdentityColumn OK"); - } - - //@Test - public void testBLOBColumn() { - String sql = "CREATE TABLE IF NOT EXISTS BLOBTEST (KEY VARCHAR, V1 VARBINARY, C1 CLOB, PRIMARY KEY (KEY))";// add - createTable(sql); - - String key = "BLOB test"; - byte[] v1 = new byte[4096]; - new Random().nextBytes(v1); - String constitution = - "We the People of the United States, in Order to form a more perfect Union, establish Justice, insure domestic Tranquility, provide for the common defense, promote the "+ - "general Welfare, and secure the Blessings of Liberty to ourselves and our Posterity, do ordain and establish this Constitution for the United States of America."+ - "Section 1"+ - "All legislative Powers herein granted shall be vested in a Congress of the United States, which shall consist of a Senate and House of Representatives."+ - ""+ - "Section 2"+ - "1: The House of Representatives shall be composed of Members chosen every second Year by the People of the several States, and the Electors in each State shall "+ - "have the Qualifications requisite for Electors of the most numerous Branch of the State Legislature."+ - ""+ - "2: No Person shall be a Representative who shall not have attained to the Age of twenty five Years, and been seven Years a Citizen of the United States, "+ - "and who shall not, when elected, be an Inhabitant of that State in which he shall be chosen."+ - ""+ - "3: Representatives and direct Taxes shall be apportioned among the several States which may be included within this Union, according to their respective Numbers, which shall be determined "+ - "by adding to the whole Number of free Persons, including those bound to Service for a Term of Years, and excluding Indians not taxed, three fifths of all other Persons. "+ - "2 The actual Enumeration shall be made within three Years after the first Meeting of the Congress of the United States, and within every subsequent Term of ten Years, in such Manner as "+ - "they shall by Law direct. The Number of Representatives shall not exceed one for every thirty Thousand, but each State shall have at Least one Representative; and until such enumeration "+ - "shall be made, the State of New Hampshire shall be entitled to chuse three, Massachusetts eight, Rhode-Island and Providence Plantations one, Connecticut five, New-York six, New Jersey four, "+ - "Pennsylvania eight, Delaware one, Maryland six, Virginia ten, North Carolina five, South Carolina five, and Georgia three."+ - ""+ - "4: When vacancies happen in the Representation from any State, the Executive Authority thereof shall issue Writs of Election to fill such Vacancies."+ - ""+ - "5: The House of Representatives shall chuse their Speaker and other Officers; and shall have the sole Power of Impeachment."+ - "etc., etc. ..."; - Reader c1 = new StringReader(constitution); - - // Put data in DB1 - try { - CallableStatement ps = db1.prepareCall("INSERT INTO BLOBTEST(KEY, V1, C1) VALUES (?, ?, ?)"); - ps.setString(1, key); - ps.setBytes(2, v1); - ps.setClob(3, c1); - ps.execute(); - db1.commit(); - ps.close(); - } catch (Exception ex) { - ex.printStackTrace(); - fail("testBLOBColumn 1: " + ex.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT * FROM BLOBTEST"); - if (rs.next()) { - String v1s = new String(v1); - assertEquals(key, rs.getString("key")); - assertEquals(v1s, new String(rs.getBytes("v1"))); - assertEquals(constitution, new String(rs.getBytes("c1"))); - } else { - fail("SELECT * FROM BLOBTEST"); - } - } catch (Exception ex) { - logger.error(ex); - ex.printStackTrace(); - fail("testBLOBColumn 2: " + ex.toString()); - } - System.out.println("CrossSiteTest.testBLOBColumn OK"); - } - - //@Test - public void testSecondaryIndex() { - String sql = "CREATE TABLE IF NOT EXISTS ARTISTS (ARTIST VARCHAR, GENRE VARCHAR, AGE INT, PRIMARY KEY (ARTIST))"; - createTable(sql); - - // Put data in DB1 - try { - Statement s = db1.createStatement(); - s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Anne-Sophie', 'classical', 53)"); - s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Dizz', 'jazz', 99)"); - s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Esperanza', 'jazz', 32)"); - s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Miles', 'jazz', 90)"); - s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Yo-yo', 'classical', 61)"); - s.execute("CREATE INDEX BYGENRE on ARTISTS(GENRE)"); - db1.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM ARTISTS WHERE GENRE = 'jazz'"); - if (rs.next()) { - int n = rs.getInt(1); - assertEquals(3, n); - } else { - fail("SELECT COUNT(*) produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - // Delete a row - try { - Statement s = db1.createStatement(); - s.execute("DELETE FROM ARTISTS WHERE ARTIST = 'Miles'"); - db1.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Recheck - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM ARTISTS WHERE GENRE = 'jazz'"); - if (rs.next()) { - int n = rs.getInt(1); - assertEquals(2, n); - } else { - fail("SELECT COUNT(*) produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - System.out.println("CrossSiteTest.testSecondaryIndex OK"); - } - - //@Test - public void testUpdate() { - String sql = "CREATE TABLE IF NOT EXISTS UPDATETEST(KEY VARCHAR(255), OTHER VARCHAR(255), PRIMARY KEY (KEY))"; - createTable(sql); - - // Put data in DB1 - try { - Statement s = db1.createStatement(); - s.execute("INSERT INTO UPDATETEST(KEY, OTHER) VALUES('foo', 'bar')"); - s.execute("INSERT INTO UPDATETEST(KEY, OTHER) VALUES('bar', 'nixon')"); - db1.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Get data in DB2 - logger.info(" Get data in DB2"); - try { - Statement s = db2.createStatement(); - ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM UPDATETEST"); - if (rs.next()) { - int n = rs.getInt(1); - assertEquals(2, n); - } else { - fail("SELECT COUNT(*) produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - // Update a row - try { - Statement s = db2.createStatement(); - s.execute("UPDATE UPDATETEST SET OTHER = 'obama' WHERE KEY = 'bar'"); - db2.commit(); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - // Recheck - logger.info(" Get data in DB2"); - try { - Statement s = db1.createStatement(); - ResultSet rs = s.executeQuery("SELECT OTHER FROM UPDATETEST WHERE KEY = 'bar'"); - if (rs.next()) { - String str = rs.getString("OTHER"); - assertEquals("obama", str); - } else { - fail("SELECT OTHER produced no result"); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - System.out.println("CrossSiteTest.testUpdate OK"); - } - - private void createTable(String sql) { - try { - for (Connection db : new Connection[] { db1, db2 }) { - logger.info(" start: "+db); - Statement s = db.createStatement(); - s.execute(sql); - db.commit(); - s.close(); - logger.info(" Tables created"); - } - } catch (Exception e) { - fail(e.toString()); - } - } -} diff --git a/src/test/java/com/att/research/mdbc/test/TestCommon.java b/src/test/java/com/att/research/mdbc/test/TestCommon.java deleted file mode 100755 index e5e85dc..0000000 --- a/src/test/java/com/att/research/mdbc/test/TestCommon.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.att.research.mdbc.test; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.Properties; - -import com.att.research.mdbc.mixins.CassandraMixin; - -public class TestCommon { - public static final String DB_DRIVER = "avatica.Driver"; - public static final String DB_USER = ""; - public static final String DB_PASSWORD = ""; - - public Connection getDBConnection(String url, String keyspace, String id) throws SQLException, ClassNotFoundException { - Class.forName(DB_DRIVER); - Properties driver_info = new Properties(); - driver_info.put(CassandraMixin.KEY_MY_ID, id); - driver_info.put(CassandraMixin.KEY_REPLICAS, "0,1,2"); - driver_info.put(CassandraMixin.KEY_MUSIC_ADDRESS, "localhost"); - driver_info.put("user", DB_USER); - driver_info.put("password", DB_PASSWORD); - return DriverManager.getConnection(url, driver_info); - } -} diff --git a/src/test/java/com/att/research/mdbc/test/TransactionTest.java b/src/test/java/com/att/research/mdbc/test/TransactionTest.java deleted file mode 100755 index 9d50db5..0000000 --- a/src/test/java/com/att/research/mdbc/test/TransactionTest.java +++ /dev/null @@ -1,164 +0,0 @@ -package com.att.research.mdbc.test; - -import static org.junit.Assert.fail; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -import org.apache.log4j.Logger; -import org.junit.Test; - - -public class TransactionTest extends TestCommon { - private static final String DB_CONNECTION1 = "avatica://" + "mem:db1"; - private static final String DB_CONNECTION2 = "avatica://" + "mem:db2"; - private static final String KEYSPACE = "CrossSite_Test"; - private final static Logger logger = Logger.getLogger(CrossSiteTest.class); - - //@Test - public void testWithAutocommitTrue() { - System.out.println("START TransactionTest.testWithAutocommitTrue"); - Set vals = new HashSet(Arrays.asList("1", "2", "3")); - Connection db1 = null, db2 = null; - try { - db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); - db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); - createTable(new Connection[] { db1, db2 }); - db1.setAutoCommit(true); - insert(db1, vals); - readcheck(db2, vals); - } catch (Exception e) { - fail("Unexpected exception: "+e); - } finally { - try { - if (db1 != null) - db1.close(); - if (db2 != null) - db2.close(); - } catch (SQLException e) { - // ignore - } - } - } - //@Test - public void testCommit() { - System.out.println("START TransactionTest.testCommit"); - Set vals = new HashSet(Arrays.asList("1", "2", "3", "4")); - Set val2 = new HashSet(Arrays.asList("1", "2", "4")); - Connection db1 = null, db2 = null; - try { - db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); - db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); - createTable(new Connection[] { db1, db2 }); - db1.setAutoCommit(false); - insert(db1, vals); - delete(db1, new HashSet(Arrays.asList("3"))); - readcheck(db1, val2); - readcheck(db2, new HashSet()); - db1.commit(); - readcheck(db2, val2); - } catch (Exception e) { - fail("Unexpected exception: "+e); - } finally { - try { - if (db1 != null) - db1.close(); - if (db2 != null) - db2.close(); - } catch (SQLException e) { - // ignore - } - } - } - //@Test - public void testRollback() { - System.out.println("START TransactionTest.testRollback"); - Set vals = new HashSet(Arrays.asList("1", "2", "3", "4")); - Connection db1 = null, db2 = null; - try { - db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); - db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); - createTable(new Connection[] { db1, db2 }); - db1.setAutoCommit(false); - insert(db1, vals); - readcheck(db1, vals); - readcheck(db2, new HashSet()); - db1.rollback(); - readcheck(db1, new HashSet()); - readcheck(db2, new HashSet()); - } catch (Exception e) { - fail("Unexpected exception: "+e); - } finally { - try { - if (db1 != null) - db1.close(); - if (db2 != null) - db2.close(); - } catch (SQLException e) { - // ignore - } - } - } - private void createTable(Connection[] c) { - try { - for (Connection db : c) { - logger.info(" start: "+db); - Statement s = db.createStatement(); - s.execute("CREATE TABLE IF NOT EXISTS TRANSTEST(KEY VARCHAR(255), PRIMARY KEY (KEY))"); - s.close(); - logger.info(" Tables created"); - } - } catch (Exception e) { - fail(e.toString()); - } - } - private void insert(Connection db, Set vals) { - // Put data in DB1 - try { - Statement s = db.createStatement(); - for (String v : vals) - s.execute("INSERT INTO TRANSTEST(KEY) VALUES('"+v+"')"); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - } - private void delete(Connection db, Set vals) { - // Put data in DB1 - try { - Statement s = db.createStatement(); - for (String v : vals) - s.execute("DELETE FROM TRANSTEST WHERE KEY = '"+v+"'"); - s.close(); - } catch (Exception e) { - fail("1: " + e.toString()); - } - } - private void readcheck(Connection db, Set vals) { - try { - Statement s = db.createStatement(); - ResultSet rs = s.executeQuery("SELECT * FROM TRANSTEST"); - Set newset = new HashSet(); - while (rs.next()) { - String tmp = rs.getString(1); - newset.add(tmp); - } - if (vals.size() != newset.size()) { - fail("wrong number of elements, expected "+vals.size()+" got "+newset.size()); - } - for (String t : vals) { - if (!newset.contains(t)) - fail("missing element: "+t); - } - } catch (Exception e) { - logger.error(e); - e.printStackTrace(); - fail("2: " + e.toString()); - } - } -} diff --git a/src/test/java/org/onap/music/mdbc/MDBCUtilsTest.java b/src/test/java/org/onap/music/mdbc/MDBCUtilsTest.java new file mode 100644 index 0000000..5e39244 --- /dev/null +++ b/src/test/java/org/onap/music/mdbc/MDBCUtilsTest.java @@ -0,0 +1,72 @@ +package org.onap.music.mdbc; + +import org.onap.music.mdbc.tables.OperationType; +import org.onap.music.mdbc.tables.StagingTable; + +import org.json.JSONArray; +import org.json.JSONObject; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashMap; + +import static org.junit.Assert.*; + +public class MDBCUtilsTest { + + @Test + public void toStringTest1() { + StagingTable table = new StagingTable(); + table.addOperation("test",OperationType.INSERT,(new JSONObject(new String[]{"test3", "Test4"})).toString()); + String output=null; + try { + output = MDBCUtils.toString(table); + } catch (IOException e) { + e.printStackTrace(); + fail(); + } + assertTrue(output!=null); + assertTrue(!output.isEmpty()); + } + + @Test + public void toStringTest2() { + HashMap mapToSerialize = new HashMap<>(); + StagingTable table = new StagingTable(); + table.addOperation("test",OperationType.INSERT,(new JSONObject(new String[]{"test3", "Test4"})).toString()); + mapToSerialize.put("table",table); + String output=null; + try { + output = MDBCUtils.toString(mapToSerialize); + } catch (IOException e) { + e.printStackTrace(); + fail(); + } + assertTrue(output!=null); + assertTrue(!output.isEmpty()); + } + + @Test + public void toStringTest3() { + String testStr = "test"; + OperationType typeTest = OperationType.INSERT; + String output=null; + try { + output = MDBCUtils.toString(testStr); + } catch (IOException e) { + e.printStackTrace(); + fail(); + } + assertTrue(output!=null); + assertTrue(!output.isEmpty()); + output=null; + try { + output = MDBCUtils.toString(typeTest); + } catch (IOException e) { + e.printStackTrace(); + fail(); + } + assertTrue(output!=null); + assertTrue(!output.isEmpty()); + } +} \ No newline at end of file diff --git a/src/test/java/org/onap/music/mdbc/test/ALLTESTS.java b/src/test/java/org/onap/music/mdbc/test/ALLTESTS.java new file mode 100755 index 0000000..d4ef4c3 --- /dev/null +++ b/src/test/java/org/onap/music/mdbc/test/ALLTESTS.java @@ -0,0 +1,14 @@ +package org.onap.music.mdbc.test; + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + //BasicTest.class, + //CrossSiteTest.class, + //TransactionTest.class +}) + +public class ALLTESTS { +} diff --git a/src/test/java/org/onap/music/mdbc/test/BasicTest.java b/src/test/java/org/onap/music/mdbc/test/BasicTest.java new file mode 100755 index 0000000..b88a22e --- /dev/null +++ b/src/test/java/org/onap/music/mdbc/test/BasicTest.java @@ -0,0 +1,77 @@ +package org.onap.music.mdbc.test; + +import static org.junit.Assert.*; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + + +/** + * This is a basic test which creates some tables, does a few selects, adn runs some joins. + * It is mainly intended to make sure that no exceptions are thrown in basic operation. + */ +public class BasicTest extends TestCommon { + private static final String DB_CONNECTION = "avatica://" + "mem:db1"; + private static final String KEYSPACE = "Basic_Test"; + + //@Test + public void test() { + try { + Connection connection = getDBConnection(DB_CONNECTION, KEYSPACE, "0"); + assertNotNull(connection); + System.out.println("GOT conn"); + Statement stmt = connection.createStatement(); + assertNotNull(stmt); + System.out.println("GOT stmt"); + + try { + connection.setAutoCommit(false); + stmt.execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))"); + stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Anju', '111-22-3333')"); + stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Sonia', '111-22-4444')"); + stmt.execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Asha', '111-55-6666')"); + dumptable(connection); + + stmt.execute("DELETE FROM PERSON WHERE ID_ = '1'"); + dumptable(connection); + + stmt.execute("UPDATE PERSON SET NAME = 'foobar' WHERE ID_ = '2'"); + dumptable(connection); + + stmt.execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))"); + stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')"); + stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')"); + stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')"); + stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')"); + stmt.execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')"); + ResultSet rs = stmt.executeQuery("SELECT * FROM PERSON AS P, SONG AS S WHERE P.ID_ = S.ID_"); + while (rs.next()) { + System.out.println("ID_ " + rs.getInt("ID_") + " Name: " + rs.getString("NAME") + " Aria: " + rs.getString("ARIA")); + } + rs.close(); + stmt.close(); + connection.commit(); + } catch (Exception e) { + fail(e.toString()); + } finally { + connection.close(); + } + } catch (Exception e) { + e.printStackTrace(); + fail(e.toString()); + } + System.out.println("BasicTest.test OK"); + } + + private void dumptable(Connection connection) throws SQLException { + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery("SELECT * FROM PERSON"); + while (rs.next()) { + System.out.println("ID_ " + rs.getInt("ID_") + " Name " + rs.getString("name")); + } + stmt.close(); + System.out.println("--"); + } +} diff --git a/src/test/java/org/onap/music/mdbc/test/CrossSiteTest.java b/src/test/java/org/onap/music/mdbc/test/CrossSiteTest.java new file mode 100755 index 0000000..d15589f --- /dev/null +++ b/src/test/java/org/onap/music/mdbc/test/CrossSiteTest.java @@ -0,0 +1,447 @@ +package org.onap.music.mdbc.test; + +import static org.junit.Assert.*; + +import java.io.Reader; +import java.io.StringReader; +import java.sql.CallableStatement; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.Random; + +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + + +/** + * This test tests a copy of data from DB1 to DB2. It tests the following H2 data types: + * VARCHAR, VARBINARY, INTEGER, BOOLEAN, DOUBLE, CLOB, TIMESTAMP. + */ +public class CrossSiteTest extends TestCommon { + private static final String DB_CONNECTION1 = "avatica://" + "mem:db1"; + private static final String DB_CONNECTION2 = "avatica://" + "mem:db2"; + private static final String KEYSPACE = "CrossSite_Test"; + private final static Logger logger = Logger.getLogger(CrossSiteTest.class); + + private Connection db1, db2; + + //@BeforeClass + public static void setUpBeforeClass() throws Exception { + // drop the keyspace + } + + //@Before + public void setUp() throws Exception { + db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); + db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); + } + + //@After + public void tearDown() throws Exception { + db1.close(); + db2.close(); + } + + //@Test + public void testCopyOneToTwo() { + String sql = "CREATE TABLE IF NOT EXISTS DATA(KEY VARCHAR(255), PRIMARY KEY (KEY))"; + createTable(sql); + + // Put data in DB1 + try { + Statement s = db1.createStatement(); + s.execute("INSERT INTO DATA(KEY) VALUES('AAA')"); + s.execute("INSERT INTO DATA(KEY) VALUES('BBB')"); + s.execute("INSERT INTO DATA(KEY) VALUES('CCC')"); + s.execute("INSERT INTO DATA(KEY) VALUES('DDD')"); + db1.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA"); + if (rs.next()) { + int n = rs.getInt(1); + assertEquals(4, n); + } else { + fail("SELECT COUNT(*) produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + // Delete a row + try { + Statement s = db1.createStatement(); + s.execute("DELETE FROM DATA WHERE KEY = 'CCC'"); + db1.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Recheck + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA"); + if (rs.next()) { + int n = rs.getInt(1); + assertEquals(3, n); + } else { + fail("SELECT COUNT(*) produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + System.out.println("CrossSiteTest.testCopyOneToTwo OK"); + } + + //@Test + public void testCopyWithPreparedStatement() { + String sql = "CREATE TABLE IF NOT EXISTS DATA2(KEY VARCHAR(255), PRIMARY KEY (KEY))"; + createTable(sql); + + // Put data in DB1 + try { + Statement s = db1.createStatement(); + PreparedStatement ps = db1.prepareStatement("INSERT INTO DATA2(KEY) VALUES(?)"); + for (String v : new String[] { "WWW", "XXX", "YYY", "ZZZ" } ) { + ps.setString(1, v); + ps.execute(); + } + db1.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM DATA2"); + if (rs.next()) { + int n = rs.getInt(1); + assertEquals(4, n); + } else { + fail("SELECT COUNT(*) produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + System.out.println("CrossSiteTest.testCopyWithPreparedStatement OK"); + } + + //@Test + public void testDataTypes() { + String sql = "CREATE TABLE IF NOT EXISTS DATATYPES(KEY VARCHAR(255), I1 INTEGER, B1 BOOLEAN, D1 DOUBLE, S1 VARCHAR, PRIMARY KEY (KEY))"; + createTable(sql); + + String key = "ThIs Is ThE KeY"; + String key2 = "ThIs Is another KeY"; + String s1 = "The Rain in Spain"; + int i1 = 696969; + boolean b1 = true; + double pi = Math.PI; + double e = Math.E; + + // Put data in DB1 + try { + PreparedStatement ps = db1.prepareStatement("INSERT INTO DATATYPES(KEY, I1, B1, D1, S1) VALUES(?, ?, ?, ?, ?)"); + ps.setString(1, key); + ps.setInt(2, i1); + ps.setBoolean(3, b1); + ps.setDouble(4, pi); + ps.setString(5, s1); + ps.execute(); + + ps.setString(1, key2); + ps.setInt(2, 123456); + ps.setBoolean(3, false); + ps.setDouble(4, e); + ps.setString(5, "Fee fi fo fum!"); + ps.execute(); + db1.commit(); + ps.close(); + } catch (Exception ex) { + fail("1: " + ex.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT * FROM DATATYPES"); + if (rs.next()) { + assertEquals(key, rs.getString(1)); + assertEquals(i1, rs.getInt(2)); + assertEquals(b1, rs.getBoolean(3)); + assertEquals(pi, rs.getDouble(4), 0.0); + assertEquals(s1, rs.getString(5)); + } else { + fail("SELECT * FROM DATATYPES"); + } + } catch (Exception ex) { + logger.error(ex); + ex.printStackTrace(); + fail("2: " + ex.toString()); + } + System.out.println("CrossSiteTest.testDataTypes OK"); + } + + //@Test + public void testIdentityColumn() { + String sql = "CREATE TABLE IF NOT EXISTS IDENTITYTEST(KEY IDENTITY, S1 VARCHAR, T1 TIMESTAMP, PRIMARY KEY (KEY))"; + createTable(sql); + + String s1 = "ThIs Is ThE IDENTITY test"; + Timestamp ts = new Timestamp(-3535344000L); + + // Put data in DB1 + try { + PreparedStatement ps = db1.prepareStatement("INSERT INTO IDENTITYTEST(S1, T1) VALUES(?, ?)"); + ps.setString(1, s1); + ps.setTimestamp(2, ts); + ps.execute(); + db1.commit(); + ps.close(); + } catch (Exception ex) { + fail("testIdentity 1: " + ex.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT * FROM IDENTITYTEST"); + if (rs.next()) { + assertEquals(s1, rs.getString("s1")); + assertEquals(ts, rs.getTimestamp("t1")); + } else { + fail("SELECT * FROM DATATYPES"); + } + } catch (Exception ex) { + logger.error(ex); + ex.printStackTrace(); + fail("testIdentity 2: " + ex.toString()); + } + System.out.println("CrossSiteTest.testIdentityColumn OK"); + } + + //@Test + public void testBLOBColumn() { + String sql = "CREATE TABLE IF NOT EXISTS BLOBTEST (KEY VARCHAR, V1 VARBINARY, C1 CLOB, PRIMARY KEY (KEY))";// add + createTable(sql); + + String key = "BLOB test"; + byte[] v1 = new byte[4096]; + new Random().nextBytes(v1); + String constitution = + "We the People of the United States, in Order to form a more perfect Union, establish Justice, insure domestic Tranquility, provide for the common defense, promote the "+ + "general Welfare, and secure the Blessings of Liberty to ourselves and our Posterity, do ordain and establish this Constitution for the United States of America."+ + "Section 1"+ + "All legislative Powers herein granted shall be vested in a Congress of the United States, which shall consist of a Senate and House of Representatives."+ + ""+ + "Section 2"+ + "1: The House of Representatives shall be composed of Members chosen every second Year by the People of the several States, and the Electors in each State shall "+ + "have the Qualifications requisite for Electors of the most numerous Branch of the State Legislature."+ + ""+ + "2: No Person shall be a Representative who shall not have attained to the Age of twenty five Years, and been seven Years a Citizen of the United States, "+ + "and who shall not, when elected, be an Inhabitant of that State in which he shall be chosen."+ + ""+ + "3: Representatives and direct Taxes shall be apportioned among the several States which may be included within this Union, according to their respective Numbers, which shall be determined "+ + "by adding to the whole Number of free Persons, including those bound to Service for a Term of Years, and excluding Indians not taxed, three fifths of all other Persons. "+ + "2 The actual Enumeration shall be made within three Years after the first Meeting of the Congress of the United States, and within every subsequent Term of ten Years, in such Manner as "+ + "they shall by Law direct. The Number of Representatives shall not exceed one for every thirty Thousand, but each State shall have at Least one Representative; and until such enumeration "+ + "shall be made, the State of New Hampshire shall be entitled to chuse three, Massachusetts eight, Rhode-Island and Providence Plantations one, Connecticut five, New-York six, New Jersey four, "+ + "Pennsylvania eight, Delaware one, Maryland six, Virginia ten, North Carolina five, South Carolina five, and Georgia three."+ + ""+ + "4: When vacancies happen in the Representation from any State, the Executive Authority thereof shall issue Writs of Election to fill such Vacancies."+ + ""+ + "5: The House of Representatives shall chuse their Speaker and other Officers; and shall have the sole Power of Impeachment."+ + "etc., etc. ..."; + Reader c1 = new StringReader(constitution); + + // Put data in DB1 + try { + CallableStatement ps = db1.prepareCall("INSERT INTO BLOBTEST(KEY, V1, C1) VALUES (?, ?, ?)"); + ps.setString(1, key); + ps.setBytes(2, v1); + ps.setClob(3, c1); + ps.execute(); + db1.commit(); + ps.close(); + } catch (Exception ex) { + ex.printStackTrace(); + fail("testBLOBColumn 1: " + ex.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT * FROM BLOBTEST"); + if (rs.next()) { + String v1s = new String(v1); + assertEquals(key, rs.getString("key")); + assertEquals(v1s, new String(rs.getBytes("v1"))); + assertEquals(constitution, new String(rs.getBytes("c1"))); + } else { + fail("SELECT * FROM BLOBTEST"); + } + } catch (Exception ex) { + logger.error(ex); + ex.printStackTrace(); + fail("testBLOBColumn 2: " + ex.toString()); + } + System.out.println("CrossSiteTest.testBLOBColumn OK"); + } + + //@Test + public void testSecondaryIndex() { + String sql = "CREATE TABLE IF NOT EXISTS ARTISTS (ARTIST VARCHAR, GENRE VARCHAR, AGE INT, PRIMARY KEY (ARTIST))"; + createTable(sql); + + // Put data in DB1 + try { + Statement s = db1.createStatement(); + s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Anne-Sophie', 'classical', 53)"); + s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Dizz', 'jazz', 99)"); + s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Esperanza', 'jazz', 32)"); + s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Miles', 'jazz', 90)"); + s.execute("INSERT INTO ARTISTS(ARTIST, GENRE, AGE) VALUES('Yo-yo', 'classical', 61)"); + s.execute("CREATE INDEX BYGENRE on ARTISTS(GENRE)"); + db1.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM ARTISTS WHERE GENRE = 'jazz'"); + if (rs.next()) { + int n = rs.getInt(1); + assertEquals(3, n); + } else { + fail("SELECT COUNT(*) produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + // Delete a row + try { + Statement s = db1.createStatement(); + s.execute("DELETE FROM ARTISTS WHERE ARTIST = 'Miles'"); + db1.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Recheck + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM ARTISTS WHERE GENRE = 'jazz'"); + if (rs.next()) { + int n = rs.getInt(1); + assertEquals(2, n); + } else { + fail("SELECT COUNT(*) produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + System.out.println("CrossSiteTest.testSecondaryIndex OK"); + } + + //@Test + public void testUpdate() { + String sql = "CREATE TABLE IF NOT EXISTS UPDATETEST(KEY VARCHAR(255), OTHER VARCHAR(255), PRIMARY KEY (KEY))"; + createTable(sql); + + // Put data in DB1 + try { + Statement s = db1.createStatement(); + s.execute("INSERT INTO UPDATETEST(KEY, OTHER) VALUES('foo', 'bar')"); + s.execute("INSERT INTO UPDATETEST(KEY, OTHER) VALUES('bar', 'nixon')"); + db1.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Get data in DB2 + logger.info(" Get data in DB2"); + try { + Statement s = db2.createStatement(); + ResultSet rs = s.executeQuery("SELECT COUNT(*) FROM UPDATETEST"); + if (rs.next()) { + int n = rs.getInt(1); + assertEquals(2, n); + } else { + fail("SELECT COUNT(*) produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + // Update a row + try { + Statement s = db2.createStatement(); + s.execute("UPDATE UPDATETEST SET OTHER = 'obama' WHERE KEY = 'bar'"); + db2.commit(); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + // Recheck + logger.info(" Get data in DB2"); + try { + Statement s = db1.createStatement(); + ResultSet rs = s.executeQuery("SELECT OTHER FROM UPDATETEST WHERE KEY = 'bar'"); + if (rs.next()) { + String str = rs.getString("OTHER"); + assertEquals("obama", str); + } else { + fail("SELECT OTHER produced no result"); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + System.out.println("CrossSiteTest.testUpdate OK"); + } + + private void createTable(String sql) { + try { + for (Connection db : new Connection[] { db1, db2 }) { + logger.info(" start: "+db); + Statement s = db.createStatement(); + s.execute(sql); + db.commit(); + s.close(); + logger.info(" Tables created"); + } + } catch (Exception e) { + fail(e.toString()); + } + } +} diff --git a/src/test/java/org/onap/music/mdbc/test/TestCommon.java b/src/test/java/org/onap/music/mdbc/test/TestCommon.java new file mode 100755 index 0000000..3067b01 --- /dev/null +++ b/src/test/java/org/onap/music/mdbc/test/TestCommon.java @@ -0,0 +1,25 @@ +package org.onap.music.mdbc.test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +import org.onap.music.mdbc.mixins.CassandraMixin; + +public class TestCommon { + public static final String DB_DRIVER = "avatica.Driver"; + public static final String DB_USER = ""; + public static final String DB_PASSWORD = ""; + + public Connection getDBConnection(String url, String keyspace, String id) throws SQLException, ClassNotFoundException { + Class.forName(DB_DRIVER); + Properties driver_info = new Properties(); + driver_info.put(CassandraMixin.KEY_MY_ID, id); + driver_info.put(CassandraMixin.KEY_REPLICAS, "0,1,2"); + driver_info.put(CassandraMixin.KEY_MUSIC_ADDRESS, "localhost"); + driver_info.put("user", DB_USER); + driver_info.put("password", DB_PASSWORD); + return DriverManager.getConnection(url, driver_info); + } +} diff --git a/src/test/java/org/onap/music/mdbc/test/TransactionTest.java b/src/test/java/org/onap/music/mdbc/test/TransactionTest.java new file mode 100755 index 0000000..08eba8a --- /dev/null +++ b/src/test/java/org/onap/music/mdbc/test/TransactionTest.java @@ -0,0 +1,164 @@ +package org.onap.music.mdbc.test; + +import static org.junit.Assert.fail; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import org.apache.log4j.Logger; +import org.junit.Test; + + +public class TransactionTest extends TestCommon { + private static final String DB_CONNECTION1 = "avatica://" + "mem:db1"; + private static final String DB_CONNECTION2 = "avatica://" + "mem:db2"; + private static final String KEYSPACE = "CrossSite_Test"; + private final static Logger logger = Logger.getLogger(CrossSiteTest.class); + + //@Test + public void testWithAutocommitTrue() { + System.out.println("START TransactionTest.testWithAutocommitTrue"); + Set vals = new HashSet(Arrays.asList("1", "2", "3")); + Connection db1 = null, db2 = null; + try { + db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); + db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); + createTable(new Connection[] { db1, db2 }); + db1.setAutoCommit(true); + insert(db1, vals); + readcheck(db2, vals); + } catch (Exception e) { + fail("Unexpected exception: "+e); + } finally { + try { + if (db1 != null) + db1.close(); + if (db2 != null) + db2.close(); + } catch (SQLException e) { + // ignore + } + } + } + //@Test + public void testCommit() { + System.out.println("START TransactionTest.testCommit"); + Set vals = new HashSet(Arrays.asList("1", "2", "3", "4")); + Set val2 = new HashSet(Arrays.asList("1", "2", "4")); + Connection db1 = null, db2 = null; + try { + db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); + db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); + createTable(new Connection[] { db1, db2 }); + db1.setAutoCommit(false); + insert(db1, vals); + delete(db1, new HashSet(Arrays.asList("3"))); + readcheck(db1, val2); + readcheck(db2, new HashSet()); + db1.commit(); + readcheck(db2, val2); + } catch (Exception e) { + fail("Unexpected exception: "+e); + } finally { + try { + if (db1 != null) + db1.close(); + if (db2 != null) + db2.close(); + } catch (SQLException e) { + // ignore + } + } + } + //@Test + public void testRollback() { + System.out.println("START TransactionTest.testRollback"); + Set vals = new HashSet(Arrays.asList("1", "2", "3", "4")); + Connection db1 = null, db2 = null; + try { + db1 = getDBConnection(DB_CONNECTION1, KEYSPACE, "0"); + db2 = getDBConnection(DB_CONNECTION2, KEYSPACE, "1"); + createTable(new Connection[] { db1, db2 }); + db1.setAutoCommit(false); + insert(db1, vals); + readcheck(db1, vals); + readcheck(db2, new HashSet()); + db1.rollback(); + readcheck(db1, new HashSet()); + readcheck(db2, new HashSet()); + } catch (Exception e) { + fail("Unexpected exception: "+e); + } finally { + try { + if (db1 != null) + db1.close(); + if (db2 != null) + db2.close(); + } catch (SQLException e) { + // ignore + } + } + } + private void createTable(Connection[] c) { + try { + for (Connection db : c) { + logger.info(" start: "+db); + Statement s = db.createStatement(); + s.execute("CREATE TABLE IF NOT EXISTS TRANSTEST(KEY VARCHAR(255), PRIMARY KEY (KEY))"); + s.close(); + logger.info(" Tables created"); + } + } catch (Exception e) { + fail(e.toString()); + } + } + private void insert(Connection db, Set vals) { + // Put data in DB1 + try { + Statement s = db.createStatement(); + for (String v : vals) + s.execute("INSERT INTO TRANSTEST(KEY) VALUES('"+v+"')"); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + } + private void delete(Connection db, Set vals) { + // Put data in DB1 + try { + Statement s = db.createStatement(); + for (String v : vals) + s.execute("DELETE FROM TRANSTEST WHERE KEY = '"+v+"'"); + s.close(); + } catch (Exception e) { + fail("1: " + e.toString()); + } + } + private void readcheck(Connection db, Set vals) { + try { + Statement s = db.createStatement(); + ResultSet rs = s.executeQuery("SELECT * FROM TRANSTEST"); + Set newset = new HashSet(); + while (rs.next()) { + String tmp = rs.getString(1); + newset.add(tmp); + } + if (vals.size() != newset.size()) { + fail("wrong number of elements, expected "+vals.size()+" got "+newset.size()); + } + for (String t : vals) { + if (!newset.contains(t)) + fail("missing element: "+t); + } + } catch (Exception e) { + logger.error(e); + e.printStackTrace(); + fail("2: " + e.toString()); + } + } +} -- cgit 1.2.3-korg