aboutsummaryrefslogtreecommitdiffstats
path: root/src/main/java/com/att/research
diff options
context:
space:
mode:
authorTschaen, Brendan <ctschaen@att.com>2018-10-16 15:13:32 -0400
committerTschaen, Brendan <ctschaen@att.com>2018-10-16 15:15:16 -0400
commit73f8de325b31d350883f6752fb04d63c41112e8f (patch)
treea786305e313d8cdc040b50f995ed9342173ee571 /src/main/java/com/att/research
parent389265e6342302ce5a8db5d4a3cc215e2c24c97a (diff)
Initial commit
Change-Id: I510baf73e4d35b651fb04e5b8e038dacb6a5a130 Issue-ID: MUSIC-149 Signed-off-by: Tschaen, Brendan <ctschaen@att.com>
Diffstat (limited to 'src/main/java/com/att/research')
-rw-r--r--src/main/java/com/att/research/exceptions/MDBCServiceException.java88
-rw-r--r--src/main/java/com/att/research/exceptions/QueryException.java90
-rw-r--r--src/main/java/com/att/research/logging/EELFLoggerDelegate.java339
-rw-r--r--src/main/java/com/att/research/logging/format/AppMessages.java156
-rw-r--r--src/main/java/com/att/research/logging/format/ErrorSeverity.java37
-rw-r--r--src/main/java/com/att/research/logging/format/ErrorTypes.java44
-rw-r--r--src/main/java/com/att/research/mdbc/ArchiveProcess.java43
-rw-r--r--src/main/java/com/att/research/mdbc/Configuration.java18
-rw-r--r--src/main/java/com/att/research/mdbc/DatabaseOperations.java443
-rw-r--r--src/main/java/com/att/research/mdbc/DatabasePartition.java190
-rw-r--r--src/main/java/com/att/research/mdbc/LockId.java46
-rw-r--r--src/main/java/com/att/research/mdbc/MDBCUtils.java62
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcCallableStatement.java738
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcConnection.java419
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java743
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcServer.java162
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcServerLogic.java312
-rw-r--r--src/main/java/com/att/research/mdbc/MdbcStatement.java416
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/MusicSqlManager.java300
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/ProxyStatement.java1262
-rw-r--r--src/main/java/com/att/research/mdbc/Range.java34
-rw-r--r--src/main/java/com/att/research/mdbc/RedoRow.java29
-rw-r--r--src/main/java/com/att/research/mdbc/StateManager.java205
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/TableInfo.java75
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java71
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java180
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/config-0.json16
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/ranges.json14
-rw-r--r--src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json19
-rw-r--r--src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java125
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java287
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/CassandraMixin.java1288
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/DBInterface.java91
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MixinFactory.java125
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MusicConnector.java124
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MusicInterface.java178
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/MusicMixin.java249
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/MySQLMixin.java784
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/Operation.java31
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/OperationType.java5
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java19
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java15
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java15
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/StagingTable.java50
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java15
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TitReference.java12
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java19
-rw-r--r--src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java206
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/Utils.java220
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/mixins/package-info.java47
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/package-info.java87
-rw-r--r--src/main/java/com/att/research/mdbc/tests/ConnectionTest.java419
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/MAIN.java106
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test.java105
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test_Delete.java70
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test_Insert.java94
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/Test_Transactions.java74
-rwxr-xr-xsrc/main/java/com/att/research/mdbc/tests/package-info.java165
-rw-r--r--src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java71
-rw-r--r--src/main/java/com/att/research/mdbc/tools/CreatePartition.java66
60 files changed, 11713 insertions, 0 deletions
diff --git a/src/main/java/com/att/research/exceptions/MDBCServiceException.java b/src/main/java/com/att/research/exceptions/MDBCServiceException.java
new file mode 100644
index 0000000..46cc1f7
--- /dev/null
+++ b/src/main/java/com/att/research/exceptions/MDBCServiceException.java
@@ -0,0 +1,88 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+
+package com.att.research.exceptions;
+
+/**
+ * @author inam
+ *
+ */
+public class MDBCServiceException extends Exception {
+
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ private int errorCode;
+ private String errorMessage;
+
+ public int getErrorCode() {
+ return errorCode;
+ }
+
+
+ public void setErrorCode(int errorCode) {
+ this.errorCode = errorCode;
+ }
+
+
+ public String getErrorMessage() {
+ return errorMessage;
+ }
+
+
+ public void setErrorMessage(String errorMessage) {
+ this.errorMessage = errorMessage;
+ }
+
+
+ public MDBCServiceException() {
+ super();
+ }
+
+
+ public MDBCServiceException(String message) {
+ super(message);
+
+ }
+
+
+ public MDBCServiceException(Throwable cause) {
+ super(cause);
+
+ }
+
+
+ public MDBCServiceException(String message, Throwable cause) {
+ super(message, cause);
+
+ }
+
+
+ public MDBCServiceException(String message, Throwable cause, boolean enableSuppression,
+ boolean writableStackTrace) {
+ super(message, cause, enableSuppression, writableStackTrace);
+
+ }
+
+}
diff --git a/src/main/java/com/att/research/exceptions/QueryException.java b/src/main/java/com/att/research/exceptions/QueryException.java
new file mode 100644
index 0000000..77445e5
--- /dev/null
+++ b/src/main/java/com/att/research/exceptions/QueryException.java
@@ -0,0 +1,90 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+package com.att.research.exceptions;
+
+
+
+/**
+ * @author inam
+ *
+ */
+public class QueryException extends Exception {
+
+ /**
+ *
+ */
+ private static final long serialVersionUID = 1L;
+ @SuppressWarnings("unused")
+ private int errorCode;
+
+
+ /**
+ *
+ */
+ public QueryException() {
+ super();
+ }
+
+ /**
+ * @param message
+ */
+ public QueryException(String message) {
+ super(message);
+ }
+
+
+
+ /**
+ * @param message
+ */
+ public QueryException(String message, int errorCode) {
+ super(message);
+ this.errorCode = errorCode;
+ }
+
+ /**
+ * @param cause
+ */
+ public QueryException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * @param message
+ * @param cause
+ */
+ public QueryException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ /**
+ * @param message
+ * @param cause
+ * @param enableSuppression
+ * @param writableStackTrace
+ */
+ public QueryException(String message, Throwable cause, boolean enableSuppression,
+ boolean writableStackTrace) {
+ super(message, cause, enableSuppression, writableStackTrace);
+ }
+
+}
diff --git a/src/main/java/com/att/research/logging/EELFLoggerDelegate.java b/src/main/java/com/att/research/logging/EELFLoggerDelegate.java
new file mode 100644
index 0000000..4e29a75
--- /dev/null
+++ b/src/main/java/com/att/research/logging/EELFLoggerDelegate.java
@@ -0,0 +1,339 @@
+
+package com.att.research.logging;
+
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_FQDN;
+import static com.att.eelf.configuration.Configuration.MDC_SERVER_IP_ADDRESS;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_INSTANCE_ID;
+import static com.att.eelf.configuration.Configuration.MDC_SERVICE_NAME;
+
+import java.net.InetAddress;
+import java.text.MessageFormat;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.slf4j.MDC;
+
+import com.att.eelf.configuration.EELFLogger;
+import com.att.eelf.configuration.EELFManager;
+import com.att.eelf.configuration.SLF4jWrapper;
+
+public class EELFLoggerDelegate extends SLF4jWrapper implements EELFLogger {
+
+ public static final EELFLogger errorLogger = EELFManager.getInstance().getErrorLogger();
+ public static final EELFLogger applicationLogger = EELFManager.getInstance().getApplicationLogger();
+ public static final EELFLogger auditLogger = EELFManager.getInstance().getAuditLogger();
+ public static final EELFLogger metricsLogger = EELFManager.getInstance().getMetricsLogger();
+ public static final EELFLogger debugLogger = EELFManager.getInstance().getDebugLogger();
+
+ private String className;
+ private static ConcurrentMap<String, EELFLoggerDelegate> classMap = new ConcurrentHashMap<>();
+
+ public EELFLoggerDelegate(final String className) {
+ super(className);
+ this.className = className;
+ }
+
+ /**
+ * Convenience method that gets a logger for the specified class.
+ *
+ * @see #getLogger(String)
+ *
+ * @param clazz
+ * @return Instance of EELFLoggerDelegate
+ */
+ public static EELFLoggerDelegate getLogger(Class<?> clazz) {
+ return getLogger(clazz.getName());
+ }
+
+ /**
+ * Gets a logger for the specified class name. If the logger does not already
+ * exist in the map, this creates a new logger.
+ *
+ * @param className
+ * If null or empty, uses EELFLoggerDelegate as the class name.
+ * @return Instance of EELFLoggerDelegate
+ */
+ public static EELFLoggerDelegate getLogger(final String className) {
+ String classNameNeverNull = className == null || "".equals(className) ? EELFLoggerDelegate.class.getName()
+ : className;
+ EELFLoggerDelegate delegate = classMap.get(classNameNeverNull);
+ if (delegate == null) {
+ delegate = new EELFLoggerDelegate(className);
+ classMap.put(className, delegate);
+ }
+ return delegate;
+ }
+
+ /**
+ * Logs a message at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void trace(EELFLogger logger, String msg) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(msg);
+ }
+ }
+
+ /**
+ * Logs a message with parameters at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void trace(EELFLogger logger, String msg, Object... arguments) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(msg, arguments);
+ }
+ }
+
+ /**
+ * Logs a message and throwable at the lowest level: trace.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void trace(EELFLogger logger, String msg, Throwable th) {
+ if (logger.isTraceEnabled()) {
+ logger.trace(msg, th);
+ }
+ }
+
+ /**
+ * Logs a message at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void debug(EELFLogger logger, String msg) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(msg);
+ }
+ }
+
+ /**
+ * Logs a message with parameters at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void debug(EELFLogger logger, String msg, Object... arguments) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(msg, arguments);
+ }
+ }
+
+ /**
+ * Logs a message and throwable at the second-lowest level: debug.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void debug(EELFLogger logger, String msg, Throwable th) {
+ if (logger.isDebugEnabled()) {
+ logger.debug(msg, th);
+ }
+ }
+
+ /**
+ * Logs a message at info level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void info(EELFLogger logger, String msg) {
+ logger.info(className + " - "+msg);
+ }
+
+ /**
+ * Logs a message with parameters at info level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void info(EELFLogger logger, String msg, Object... arguments) {
+ logger.info(msg, arguments);
+ }
+
+ /**
+ * Logs a message and throwable at info level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void info(EELFLogger logger, String msg, Throwable th) {
+ logger.info(msg, th);
+ }
+
+ /**
+ * Logs a message at warn level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void warn(EELFLogger logger, String msg) {
+ logger.warn(msg);
+ }
+
+ /**
+ * Logs a message with parameters at warn level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void warn(EELFLogger logger, String msg, Object... arguments) {
+ logger.warn(msg, arguments);
+ }
+
+ /**
+ * Logs a message and throwable at warn level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void warn(EELFLogger logger, String msg, Throwable th) {
+ logger.warn(msg, th);
+ }
+
+ /**
+ * Logs a message at error level.
+ *
+ * @param logger
+ * @param msg
+ */
+ public void error(EELFLogger logger, String msg) {
+ logger.error(className+ " - " + msg);
+ }
+
+ /**
+ * Logs a message with parameters at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param arguments
+ */
+ public void error(EELFLogger logger, String msg, Object... arguments) {
+ logger.error(msg, arguments);
+ }
+
+ /**
+ * Logs a message and throwable at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param th
+ */
+ public void error(EELFLogger logger, String msg, Throwable th) {
+ logger.error(msg, th);
+ }
+
+ /**
+ * Logs a message with the associated alarm severity at error level.
+ *
+ * @param logger
+ * @param msg
+ * @param severtiy
+ */
+ public void error(EELFLogger logger, String msg, Object /*AlarmSeverityEnum*/ severtiy) {
+ logger.error(msg);
+ }
+
+ /**
+ * Initializes the logger context.
+ */
+ public void init() {
+ setGlobalLoggingContext();
+ final String msg = "############################ Logging is started. ############################";
+ // These loggers emit the current date-time without being told.
+ info(applicationLogger, msg);
+ error(errorLogger, msg);
+ debug(debugLogger, msg);
+ info(auditLogger, msg);
+ info(metricsLogger, msg);
+ }
+
+
+ /**
+ * Builds a message using a template string and the arguments.
+ *
+ * @param message
+ * @param args
+ * @return
+ */
+ @SuppressWarnings("unused")
+ private String formatMessage(String message, Object... args) {
+ StringBuilder sbFormattedMessage = new StringBuilder();
+ if (args != null && args.length > 0 && message != null && message != "") {
+ MessageFormat mf = new MessageFormat(message);
+ sbFormattedMessage.append(mf.format(args));
+ } else {
+ sbFormattedMessage.append(message);
+ }
+
+ return sbFormattedMessage.toString();
+ }
+
+ /**
+ * Loads all the default logging fields into the MDC context.
+ */
+ private void setGlobalLoggingContext() {
+ MDC.put(MDC_SERVICE_INSTANCE_ID, "");
+ try {
+ MDC.put(MDC_SERVER_FQDN, InetAddress.getLocalHost().getHostName());
+ MDC.put(MDC_SERVER_IP_ADDRESS, InetAddress.getLocalHost().getHostAddress());
+ } catch (Exception e) {
+ errorLogger.error("setGlobalLoggingContext failed", e);
+ }
+ }
+
+ public static void mdcPut(String key, String value) {
+ MDC.put(key, value);
+ }
+
+ public static String mdcGet(String key) {
+ return MDC.get(key);
+ }
+
+ public static void mdcRemove(String key) {
+ MDC.remove(key);
+ }
+
+ /**
+ * Loads the RequestId/TransactionId into the MDC which it should be receiving
+ * with an each incoming REST API request. Also, configures few other request
+ * based logging fields into the MDC context.
+ *
+ * @param req
+ * @param appName
+ */
+ public void setRequestBasedDefaultsIntoGlobalLoggingContext(HttpServletRequest req, String appName) {
+ // Load the default fields
+ setGlobalLoggingContext();
+
+ // Load the request based fields
+ if (req != null) {
+
+
+ // Rest Path
+ MDC.put(MDC_SERVICE_NAME, req.getServletPath());
+
+ // Client IPAddress i.e. IPAddress of the remote host who is making
+ // this request.
+ String clientIPAddress = req.getHeader("X-FORWARDED-FOR");
+ if (clientIPAddress == null) {
+ clientIPAddress = req.getRemoteAddr();
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/logging/format/AppMessages.java b/src/main/java/com/att/research/logging/format/AppMessages.java
new file mode 100644
index 0000000..a5de413
--- /dev/null
+++ b/src/main/java/com/att/research/logging/format/AppMessages.java
@@ -0,0 +1,156 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+
+package com.att.research.logging.format;
+
+/**
+ * @author inam
+ *
+ */
+public enum AppMessages {
+
+
+
+ /*
+ * 100-199 Security/Permission Related - Authentication problems
+ * [ERR100E] Missing Information
+ * [ERR101E] Authentication error occured
+ *
+ * 200-299 Availability/Timeout Related/IO - connectivity error - connection timeout
+ * [ERR200E] Connectivity
+ * [ERR201E] Host not available
+ * [ERR202E] Error while connecting
+ * [ERR203E] IO Error has occured
+ * [ERR204E] Execution Interrupted
+ * [ERR205E] Session Expired
+ *
+ *
+ *
+ * 300-399 Data Access/Integrity Related
+ * [ERR300E] Incorrect data
+ *
+ * 400-499 - Cassandra Query Related
+ *
+ *
+ * 500-599 - Zookeepr/Locking Related
+
+ *
+ *
+ * 600 - 699 - MDBC Service Errors
+ * [ERR600E] Error initializing the MDBC
+ *
+ * 700-799 Schema Interface Type/Validation - received Pay-load checksum is
+ * invalid - received JSON is not valid
+ *
+ * 800-899 Business/Flow Processing Related - check out to service is not
+ * allowed - Roll-back is done - failed to generate heat file
+ *
+ *
+ * 900-999 Unknown Errors - Unexpected exception
+ * [ERR900E] Unexpected error occured
+ * [ERR901E] Number format exception
+ *
+ *
+ * 1000-1099 Reserved - do not use
+ *
+ */
+
+
+
+
+ MISSINGINFO("[ERR100E]", "Missing Information ","Details: NA", "Please check application credentials and/or headers"),
+ AUTHENTICATIONERROR("[ERR101E]", "Authentication error occured ","Details: NA", "Please verify application credentials"),
+
+ CONNCECTIVITYERROR("[ERR200E]"," Connectivity error","Details: NA ","Please check connectivity to external resources"),
+ HOSTUNAVAILABLE("[ERR201E]","Host not available","Details: NA","Please verify the host details"),
+ IOERROR("[ERR203E]","IO Error has occured","","Please check IO"),
+ EXECUTIONINTERRUPTED("[ERR204E]"," Execution Interrupted","",""),
+
+
+ INCORRECTDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"),
+ MULTIPLERECORDS("[ERR301E]"," Multiple records found",""," Please verify the request payload and try again"),
+ ALREADYEXIST("[ERR302E]"," Record already exist",""," Please verify the request payload and try again"),
+ MISSINGDATA("[ERR300E]"," Incorrect data",""," Please verify the request payload and try again"),
+
+ QUERYERROR("[ERR400E]","Error while processing query",""," Please verify the query"),
+
+
+ UNKNOWNERROR("[ERR900E]"," Unexpected error occured",""," Please check logs for details");
+
+
+
+ ErrorTypes eType;
+ ErrorSeverity alarmSeverity;
+ ErrorSeverity errorSeverity;
+ String errorCode;
+ String errorDescription;
+ String details;
+ String resolution;
+
+
+ AppMessages(String errorCode, String errorDescription, String details,String resolution) {
+
+ this.errorCode = errorCode;
+ this.errorDescription = errorDescription;
+ this.details = details;
+ this.resolution = resolution;
+ }
+
+
+
+
+ AppMessages(ErrorTypes eType, ErrorSeverity alarmSeverity,
+ ErrorSeverity errorSeverity, String errorCode, String errorDescription, String details,
+ String resolution) {
+
+ this.eType = eType;
+ this.alarmSeverity = alarmSeverity;
+ this.errorSeverity = errorSeverity;
+ this.errorCode = errorCode;
+ this.errorDescription = errorDescription;
+ this.details = details;
+ this.resolution = resolution;
+ }
+
+ public String getDetails() {
+ return this.details;
+ }
+
+ public String getResolution() {
+ return this.resolution;
+ }
+
+ public String getErrorCode() {
+ return this.errorCode;
+ }
+
+ public String getErrorDescription() {
+ return this.errorDescription;
+ }
+
+
+
+
+
+
+
+}
diff --git a/src/main/java/com/att/research/logging/format/ErrorSeverity.java b/src/main/java/com/att/research/logging/format/ErrorSeverity.java
new file mode 100644
index 0000000..dbe3e54
--- /dev/null
+++ b/src/main/java/com/att/research/logging/format/ErrorSeverity.java
@@ -0,0 +1,37 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+package com.att.research.logging.format;
+
+/**
+ * @author inam
+ *
+ */
+public enum ErrorSeverity {
+ INFO,
+ WARN,
+ ERROR,
+ FATAL,
+ CRITICAL,
+ MAJOR,
+ MINOR,
+ NONE,
+}
diff --git a/src/main/java/com/att/research/logging/format/ErrorTypes.java b/src/main/java/com/att/research/logging/format/ErrorTypes.java
new file mode 100644
index 0000000..620528d
--- /dev/null
+++ b/src/main/java/com/att/research/logging/format/ErrorTypes.java
@@ -0,0 +1,44 @@
+/*
+ * ============LICENSE_START==========================================
+ * org.onap.music
+ * ===================================================================
+ * Copyright (c) 2017 AT&T Intellectual Property
+ * ===================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * ============LICENSE_END=============================================
+ * ====================================================================
+ */
+package com.att.research.logging.format;
+
+import com.att.eelf.i18n.EELFResolvableErrorEnum;
+
+/**
+ * @author inam
+ *
+ */
+public enum ErrorTypes implements EELFResolvableErrorEnum {
+
+
+ CONNECTIONERROR,
+ SESSIONEXPIRED,
+ AUTHENTICATIONERROR,
+ SERVICEUNAVAILABLE,
+ QUERYERROR,
+ DATAERROR,
+ GENERALSERVICEERROR,
+ MUSICSERVICEERROR,
+ LOCKINGERROR,
+ UNKNOWN,
+
+}
diff --git a/src/main/java/com/att/research/mdbc/ArchiveProcess.java b/src/main/java/com/att/research/mdbc/ArchiveProcess.java
new file mode 100644
index 0000000..f192430
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/ArchiveProcess.java
@@ -0,0 +1,43 @@
+package com.att.research.mdbc;
+
+import org.json.JSONObject;
+
+import com.att.research.mdbc.mixins.DBInterface;
+import com.att.research.mdbc.mixins.MusicInterface;
+
+public class ArchiveProcess {
+ protected MusicInterface mi;
+ protected DBInterface dbi;
+
+ //TODO: This is a place holder for taking snapshots and moving data from redo record into actual tables
+
+ /**
+ * This method is called whenever there is a DELETE on a local SQL table, and should be called by the underlying databases
+ * triggering mechanism. It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL DELETE.
+ * Music propagates it to the other replicas. If the local database is in the middle of a transaction, the DELETEs to MUSIC are
+ * delayed until the transaction is either committed or rolled back.
+ * @param tableName This is the table on which the select is being performed
+ * @param oldRow This is information about the row that is being deleted
+ */
+ @SuppressWarnings("unused")
+ private void deleteFromEntityTableInMusic(String tableName, JSONObject oldRow) {
+ TableInfo ti = dbi.getTableInfo(tableName);
+ mi.deleteFromEntityTableInMusic(ti,tableName, oldRow);
+ }
+
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, and should be called by the underlying databases
+ * triggering mechanism. It updates the MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write.
+ * Music propagates it to the other replicas. If the local database is in the middle of a transaction, the updates to MUSIC are
+ * delayed until the transaction is either committed or rolled back.
+ *
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed, an array of objects representing the data being inserted/updated
+ */
+ @SuppressWarnings("unused")
+ private void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow) {
+ //TODO: is this right? should we be saving updates at the client? we should leverage JDBC to handle this
+ TableInfo ti = dbi.getTableInfo(tableName);
+ mi.updateDirtyRowAndEntityTableInMusic(ti,tableName, changedRow);
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/Configuration.java b/src/main/java/com/att/research/mdbc/Configuration.java
new file mode 100644
index 0000000..23aa6af
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/Configuration.java
@@ -0,0 +1,18 @@
+package com.att.research.mdbc;
+
+public class Configuration {
+ /** The property name to use to connect to cassandra*/
+ public static final String KEY_CASSANDRA_URL = "CASSANDRA_URL";
+ /** The property name to use to enable/disable the MusicSqlManager entirely. */
+ public static final String KEY_DISABLED = "disabled";
+ /** The property name to use to select the DB 'mixin'. */
+ public static final String KEY_DB_MIXIN_NAME = "MDBC_DB_MIXIN";
+ /** The property name to use to select the MUSIC 'mixin'. */
+ public static final String KEY_MUSIC_MIXIN_NAME = "MDBC_MUSIC_MIXIN";
+ /** The name of the default mixin to use for the DBInterface. */
+ public static final String DB_MIXIN_DEFAULT = "mysql";//"h2";
+ /** The name of the default mixin to use for the MusicInterface. */
+ public static final String MUSIC_MIXIN_DEFAULT = "cassandra2";//"cassandra2";
+ /** Default cassandra ulr*/
+ public static final String CASSANDRA_URL_DEFAULT = "localhost";//"cassandra2";
+}
diff --git a/src/main/java/com/att/research/mdbc/DatabaseOperations.java b/src/main/java/com/att/research/mdbc/DatabaseOperations.java
new file mode 100644
index 0000000..406152e
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/DatabaseOperations.java
@@ -0,0 +1,443 @@
+package com.att.research.mdbc;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicLockingException;
+import org.onap.music.exceptions.MusicQueryException;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+import org.onap.music.main.ResultType;
+import org.onap.music.main.ReturnType;
+
+import java.util.*;
+
+public class DatabaseOperations {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabaseOperations.class);
+ /**
+ * This functions is used to generate cassandra uuid
+ * @return a random UUID that can be used for fields of type uuid
+ */
+ public static String generateUniqueKey() {
+ return UUID.randomUUID().toString();
+ }
+
+ /**
+ * This functions returns the primary key used to managed a specific row in the TableToPartition tables in Music
+ * @param namespace namespace where the TableToPartition resides
+ * @param tableToPartitionTableName name of the tableToPartition table
+ * @param tableName name of the application table that is being added to the system
+ * @return primary key to be used with MUSIC
+ */
+ public static String getTableToPartitionPrimaryKey(String namespace, String tableToPartitionTableName, String tableName){
+ return namespace+"."+tableToPartitionTableName+"."+tableName;
+ }
+
+ /**
+ * Create a new row for a table, with not assigned partition
+ * @param namespace namespace where the TableToPartition resides
+ * @param tableToPartitionTableName name of the tableToPartition table
+ * @param tableName name of the application table that is being added to the system
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ */
+ public static void createNewTableToPartitionRow(String namespace, String tableToPartitionTableName, String tableName,String lockId) throws MDBCServiceException {
+ final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,tableName);
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(tableToPartitionTableName)
+ .append(" (tablename) VALUES ")
+ .append("('")
+ .append(tableName)
+ .append("');");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,tableToPartitionTableName,tableName,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create new row table to partition table ");
+ throw new MDBCServiceException("Initialization error: Failure to create new row table to partition table");
+ }
+ }
+
+ /**
+ * Update the partition to which a table belongs
+ * @param namespace namespace where the TableToPartition resides
+ * @param tableToPartitionTableName name of the tableToPartition table
+ * @param table name of the application table that is being added to the system
+ * @param newPartition partition to which the application table is assigned
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ */
+ public static void updateTableToPartition(String namespace, String tableToPartitionTableName, String table, String newPartition, String lockId) throws MDBCServiceException {
+ final String primaryKey = getTableToPartitionPrimaryKey(namespace,tableToPartitionTableName,table);
+ PreparedQueryObject query = new PreparedQueryObject();
+ StringBuilder update = new StringBuilder("UPDATE ")
+ .append(namespace)
+ .append('.')
+ .append(tableToPartitionTableName)
+ .append(" SET previouspartitions = previouspartitions + {")
+ .append(newPartition)
+ .append("}, partition = " )
+ .append(newPartition)
+ .append(" WHERE tablename = '")
+ .append(table)
+ .append("';");
+ query.appendQueryString(update.toString());
+ try {
+ executedLockedPut(namespace,tableToPartitionTableName,table,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to update a row in table to partition table ");
+ throw new MDBCServiceException("Initialization error: Failure to update a row in table to partition table");
+ }
+ }
+
+
+ public static String getPartitionInformationPrimaryKey(String namespace, String partitionInformationTable, String partition){
+ return namespace+"."+partitionInformationTable+"."+partition;
+ }
+
+ /**
+ * Create a new row, when a new partition is initialized
+ * @param namespace namespace to which the partition info table resides in Cassandra
+ * @param partitionInfoTableName name of the partition information table
+ * @param replicationFactor associated replicated factor for the partition (max of all the tables)
+ * @param tables list of tables that are within this partitoin
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ * @return the partition uuid associated to the new row
+ */
+ public static String createPartitionInfoRow(String namespace, String partitionInfoTableName, int replicationFactor, List<String> tables, String lockId) throws MDBCServiceException {
+ String id = generateUniqueKey();
+ final String primaryKey = getPartitionInformationPrimaryKey(namespace,partitionInfoTableName,id);
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(partitionInfoTableName)
+ .append(" (partition,replicationfactor,tables) VALUES ")
+ .append("(")
+ .append(id)
+ .append(",")
+ .append(replicationFactor)
+ .append(",{");
+ boolean first = true;
+ for(String table: tables){
+ if(!first){
+ insert.append(",");
+ }
+ first = false;
+ insert.append("'")
+ .append(table)
+ .append("'");
+ }
+ insert.append("});");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,partitionInfoTableName,id,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create new row in partition information table ");
+ throw new MDBCServiceException("Initialization error: Failure to create new row in partition information table");
+ }
+ return id;
+ }
+
+ /**
+ * Update the TIT row and table that currently handles the partition
+ * @param namespace namespace to which the partition info table resides in Cassandra
+ * @param partitionInfoTableName name of the partition information table
+ * @param partitionId row identifier for the partition being modiefd
+ * @param newTitRow new TIT row and table that are handling this partition
+ * @param owner owner that is handling the new tit row (url to the corresponding etdb nodej
+ * @param lockId if the lock for this key is already hold, this is the id of that lock. May be <code>null</code> if lock is not hold for the corresponding key
+ */
+ public static void updateRedoRow(String namespace, String partitionInfoTableName, String partitionId, RedoRow newTitRow, String owner, String lockId) throws MDBCServiceException {
+ final String primaryKey = getTableToPartitionPrimaryKey(namespace,partitionInfoTableName,partitionId);
+ PreparedQueryObject query = new PreparedQueryObject();
+ String newOwner = (owner==null)?"":owner;
+ StringBuilder update = new StringBuilder("UPDATE ")
+ .append(namespace)
+ .append('.')
+ .append(partitionInfoTableName)
+ .append(" SET currentowner='")
+ .append(newOwner)
+ .append("', latesttitindex=")
+ .append(newTitRow.getRedoRowIndex())
+ .append(", latesttittable='")
+ .append(newTitRow.getRedoTableName())
+ .append("' WHERE partition = ")
+ .append(partitionId)
+ .append(";");
+ query.appendQueryString(update.toString());
+ try {
+ executedLockedPut(namespace,partitionInfoTableName,partitionId,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to add new owner to partition in music table ");
+ throw new MDBCServiceException("Initialization error:Failure to add new owner to partition in music table ");
+ }
+ }
+
+ /**
+ * Create the first row in the history of the redo history table for a given partition
+ * @param namespace namespace to which the redo history table resides in Cassandra
+ * @param redoHistoryTableName name of the table where the row is being created
+ * @param firstTitRow first tit associated to the partition
+ * @param partitionId partition for which a history is created
+ */
+ public static void createRedoHistoryBeginRow(String namespace, String redoHistoryTableName, RedoRow firstTitRow, String partitionId, String lockId) throws MDBCServiceException {
+ createRedoHistoryRow(namespace,redoHistoryTableName,firstTitRow,partitionId, new ArrayList<>(),lockId);
+ }
+
+ /**
+ * Create a new row on the history for a given partition
+ * @param namespace namespace to which the redo history table resides in Cassandra
+ * @param redoHistoryTableName name of the table where the row is being created
+ * @param currentRow new tit row associated to the partition
+ * @param partitionId partition for which a history is created
+ * @param parentsRows parent tit rows associated to this partition
+ */
+ public static void createRedoHistoryRow(String namespace, String redoHistoryTableName, RedoRow currentRow, String partitionId, List<RedoRow> parentsRows, String lockId) throws MDBCServiceException {
+ final String primaryKey = partitionId+"-"+currentRow.getRedoTableName()+"-"+currentRow.getRedoRowIndex();
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(redoHistoryTableName)
+ .append(" (partition,redotable,redoindex,previousredo) VALUES ")
+ .append("(")
+ .append(partitionId)
+ .append(",'")
+ .append(currentRow.getRedoTableName())
+ .append("',")
+ .append(currentRow.getRedoRowIndex())
+ .append(",{");
+ boolean first = true;
+ for(RedoRow parent: parentsRows){
+ if(!first){
+ insert.append(",");
+ }
+ else{
+ first = false;
+ }
+ insert.append("('")
+ .append(parent.getRedoTableName())
+ .append("',")
+ .append(parent.getRedoRowIndex())
+ .append("),");
+ }
+ insert.append("});");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,redoHistoryTableName,primaryKey,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to add new row to redo history");
+ throw new MDBCServiceException("Initialization error:Failure to add new row to redo history");
+ }
+ }
+
+ /**
+ * Creates a new empty tit row
+ * @param namespace namespace where the tit table is located
+ * @param titTableName name of the corresponding tit table where the new row is added
+ * @param partitionId partition to which the redo log is hold
+ * @return uuid associated to the new row
+ */
+ public static String CreateEmptyTitRow(String namespace, String titTableName, String partitionId, String lockId) throws MDBCServiceException {
+ String id = generateUniqueKey();
+ StringBuilder insert = new StringBuilder("INSERT INTO ")
+ .append(namespace)
+ .append('.')
+ .append(titTableName)
+ .append(" (id,applied,latestapplied,partition,redo) VALUES ")
+ .append("(")
+ .append(id)
+ .append(",false,-1,")
+ .append(partitionId)
+ .append(",[]);");
+ PreparedQueryObject query = new PreparedQueryObject();
+ query.appendQueryString(insert.toString());
+ try {
+ executedLockedPut(namespace,titTableName,id,query,lockId,null);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to add new row to transaction information");
+ throw new MDBCServiceException("Initialization error:Failure to add new row to transaction information");
+ }
+ return id;
+ }
+
+ /**
+ * This function creates the TransactionInformation table. It contain information related
+ * to the transactions happening in a given partition.
+ * * The schema of the table is
+ * * Id, uiid.
+ * * Partition, uuid id of the partition
+ * * LatestApplied, int indicates which values from the redologtable wast the last to be applied to the data tables
+ * * Applied: boolean, indicates if all the values in this redo log table where already applied to data tables
+ * * Redo: list of uiids associated to the Redo Records Table
+ *
+ */
+ public static void CreateTransactionInformationTable( String musicNamespace, String transactionInformationTableName) throws MDBCServiceException {
+ String tableName = transactionInformationTableName;
+ String priKey = "id";
+ StringBuilder fields = new StringBuilder();
+ fields.append("id uuid, ");
+ fields.append("partition uuid, ");
+ fields.append("latestapplied int, ");
+ fields.append("applied boolean, ");
+ //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly
+ fields.append("redo list<frozen<tuple<text,tuple<text,varint>>>> ");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create transaction information table");
+ throw(e);
+ }
+ }
+
+ /**
+ * This function creates the RedoRecords table. It contain information related to each transaction committed
+ * * LeaseId: id associated with the lease, text
+ * * LeaseCounter: transaction number under this lease, bigint \TODO this may need to be a varint later
+ * * TransactionDigest: text that contains all the changes in the transaction
+ */
+ public static void CreateRedoRecordsTable(int redoTableNumber, String musicNamespace, String redoRecordTableName) throws MDBCServiceException {
+ String tableName = redoRecordTableName;
+ if(redoTableNumber >= 0) {
+ StringBuilder table = new StringBuilder();
+ table.append(tableName);
+ table.append("-");
+ table.append(Integer.toString(redoTableNumber));
+ tableName=table.toString();
+ }
+ String priKey = "leaseid,leasecounter";
+ StringBuilder fields = new StringBuilder();
+ fields.append("leaseid text, ");
+ fields.append("leasecounter varint, ");
+ fields.append("transactiondigest text ");//notice lack of ','
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create redo records table");
+ throw(e);
+ }
+ }
+
+ /**
+ * This function creates the Table To Partition table. It contain information related to
+ */
+ public static void CreateTableToPartitionTable(String musicNamespace, String tableToPartitionTableName) throws MDBCServiceException {
+ String tableName = tableToPartitionTableName;
+ String priKey = "tablename";
+ StringBuilder fields = new StringBuilder();
+ fields.append("tablename text, ");
+ fields.append("partition uuid, ");
+ fields.append("previouspartitions set<uuid> ");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create table to partition table");
+ throw(e);
+ }
+ }
+
+ public static void CreatePartitionInfoTable(String musicNamespace, String partitionInformationTableName) throws MDBCServiceException {
+ String tableName = partitionInformationTableName;
+ String priKey = "partition";
+ StringBuilder fields = new StringBuilder();
+ fields.append("partition uuid, ");
+ fields.append("latesttittable text, ");
+ fields.append("latesttitindex uuid, ");
+ fields.append("tables set<text>, ");
+ fields.append("replicationfactor int, ");
+ fields.append("currentowner text");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create partition information table");
+ throw(e);
+ }
+ }
+
+ public static void CreateRedoHistoryTable(String musicNamespace, String redoHistoryTableName) throws MDBCServiceException {
+ String tableName = redoHistoryTableName;
+ String priKey = "partition,redotable,redoindex";
+ StringBuilder fields = new StringBuilder();
+ fields.append("partition uuid, ");
+ fields.append("redotable text, ");
+ fields.append("redoindex uuid, ");
+ //TODO: Frozen is only needed for old versions of cassandra, please update correspondingly
+ fields.append("previousredo set<frozen<tuple<text,uuid>>>");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", musicNamespace, tableName, fields, priKey);
+ try {
+ executeMusicWriteQuery(musicNamespace,tableName,cql);
+ } catch (MDBCServiceException e) {
+ logger.error("Initialization error: Failure to create redo history table");
+ throw(e);
+ }
+ }
+
+ /**
+ * This method executes a write query in Music
+ * @param cql the CQL to be sent to Cassandra
+ */
+ protected static void executeMusicWriteQuery(String keyspace, String table, String cql) throws MDBCServiceException {
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ ResultType rt = null;
+ try {
+ rt = MusicPureCassaCore.createTable(keyspace,table,pQueryObject,"critical");
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ }
+ if (rt.getResult().toLowerCase().equals("failure")) {
+ throw new MDBCServiceException("Music eventual put failed");
+ }
+ }
+
+ protected static void executedLockedPut(String namespace, String tableName, String primaryKeyWithoutDomain, PreparedQueryObject queryObject, String lockId, MusicPureCassaCore.Condition conditionInfo) throws MDBCServiceException {
+ ReturnType rt ;
+ if(lockId==null) {
+ try {
+ rt = MusicPureCassaCore.atomicPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, conditionInfo);
+ } catch (MusicLockingException e) {
+ logger.error("Music locked put failed");
+ throw new MDBCServiceException("Music locked put failed");
+ } catch (MusicServiceException e) {
+ logger.error("Music service fail: Music locked put failed");
+ throw new MDBCServiceException("Music service fail: Music locked put failed");
+ } catch (MusicQueryException e) {
+ logger.error("Music query fail: locked put failed");
+ throw new MDBCServiceException("Music query fail: Music locked put failed");
+ }
+ }
+ else {
+ rt = MusicPureCassaCore.criticalPut(namespace, tableName, primaryKeyWithoutDomain, queryObject, lockId, conditionInfo);
+ }
+ if (rt.getResult().getResult().toLowerCase().equals("failure")) {
+ throw new MDBCServiceException("Music locked put failed");
+ }
+ }
+
+ public static void createNamespace(String namespace, int replicationFactor) throws MDBCServiceException {
+ Map<String,Object> replicationInfo = new HashMap<String, Object>();
+ replicationInfo.put("'class'", "'SimpleStrategy'");
+ replicationInfo.put("'replication_factor'", replicationFactor);
+
+ PreparedQueryObject queryObject = new PreparedQueryObject();
+ queryObject.appendQueryString(
+ "CREATE KEYSPACE " + namespace + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":"));
+
+ try {
+ MusicPureCassaCore.nonKeyRelatedPut(queryObject, "critical");
+ } catch (MusicServiceException e) {
+ if (e.getMessage().equals("Keyspace "+namespace+" already exists")) {
+ // ignore
+ } else {
+ logger.error("Error creating namespace: "+namespace);
+ throw new MDBCServiceException("Error creating namespace: "+namespace+". Internal error:"+e.getErrorMessage());
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/DatabasePartition.java b/src/main/java/com/att/research/mdbc/DatabasePartition.java
new file mode 100644
index 0000000..6046801
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/DatabasePartition.java
@@ -0,0 +1,190 @@
+package com.att.research.mdbc;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.mixins.CassandraMixin;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * A database range contain information about what ranges should be hosted in the current MDBC instance
+ * A database range with an empty map, is supposed to contain all the tables in Music.
+ * @author Enrique Saurez
+ */
+public class DatabasePartition {
+ private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(DatabasePartition.class);
+
+ private String transactionInformationTable;//Table that currently contains the REDO log for this partition
+ private String transactionInformationIndex;//Index that can be obtained either from
+ private String redoRecordsTable;
+ private String partitionId;
+ private String lockId;
+ protected Set<Range> ranges;
+
+ /**
+ * Each range represents a partition of the database, a database partition is a union of this partitions.
+ * The only requirement is that the ranges are not overlapping.
+ */
+
+ public DatabasePartition() {
+ ranges = new HashSet<>();
+ }
+
+ public DatabasePartition(Set<Range> knownRanges, String titIndex, String titTable, String partitionId, String lockId, String redoRecordsTable) {
+ if(knownRanges != null) {
+ ranges = knownRanges;
+ }
+ else {
+ ranges = new HashSet<>();
+ }
+
+ if(redoRecordsTable != null) {
+ this.setRedoRecordsTable(redoRecordsTable);
+ }
+ else{
+ this.setRedoRecordsTable("");
+ }
+
+ if(titIndex != null) {
+ this.setTransactionInformationIndex(titIndex);
+ }
+ else {
+ this.setTransactionInformationIndex("");
+ }
+
+ if(titTable != null) {
+ this.setTransactionInformationTable(titTable);
+ }
+ else {
+ this.setTransactionInformationTable("");
+ }
+
+ if(partitionId != null) {
+ this.setPartitionId(partitionId);
+ }
+ else {
+ this.setPartitionId("");
+ }
+
+ if(lockId != null) {
+ this.setLockId(lockId);
+ }
+ else {
+ this.setLockId("");
+ }
+ }
+
+ public String getTransactionInformationTable() {
+ return transactionInformationTable;
+ }
+
+ public void setTransactionInformationTable(String transactionInformationTable) {
+ this.transactionInformationTable = transactionInformationTable;
+ }
+
+ public String getTransactionInformationIndex() {
+ return transactionInformationIndex;
+ }
+
+ public void setTransactionInformationIndex(String transactionInformationIndex) {
+ this.transactionInformationIndex = transactionInformationIndex;
+ }
+
+ /**
+ * Add a new range to the ones own by the local MDBC
+ * @param newRange range that is being added
+ * @throws IllegalArgumentException
+ */
+ public synchronized void addNewRange(Range newRange) {
+ //Check overlap
+ for(Range r : ranges) {
+ if(r.overlaps(newRange)) {
+ throw new IllegalArgumentException("Range is already contain by a previous range");
+ }
+ }
+ if(!ranges.contains(newRange)) {
+ ranges.add(newRange);
+ }
+ }
+
+ /**
+ * Delete a range that is being modified
+ * @param rangeToDel limits of the range
+ */
+ public synchronized void deleteRange(Range rangeToDel) {
+ if(!ranges.contains(rangeToDel)) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Range doesn't exist");
+ throw new IllegalArgumentException("Invalid table");
+ }
+ ranges.remove(rangeToDel);
+ }
+
+ /**
+ * Get all the ranges that are currently owned
+ * @return ranges
+ */
+ public synchronized Range[] getSnapshot() {
+ return (Range[]) ranges.toArray();
+ }
+
+ /**
+ * Serialize the ranges
+ * @return serialized ranges
+ */
+ public String toJson() {
+ GsonBuilder builder = new GsonBuilder();
+ builder.setPrettyPrinting().serializeNulls();;
+ Gson gson = builder.create();
+ return gson.toJson(this);
+ }
+
+ /**
+ * Function to obtain the configuration
+ * @param filepath path to the database range
+ * @return a new object of type DatabaseRange
+ * @throws FileNotFoundException
+ */
+
+ public static DatabasePartition readJsonFromFile( String filepath) throws FileNotFoundException {
+ BufferedReader br;
+ try {
+ br = new BufferedReader(
+ new FileReader(filepath));
+ } catch (FileNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e);
+ throw e;
+ }
+ Gson gson = new Gson();
+ DatabasePartition range = gson.fromJson(br, DatabasePartition.class);
+ return range;
+ }
+
+ public String getPartitionId() {
+ return partitionId;
+ }
+
+ public void setPartitionId(String partitionId) {
+ this.partitionId = partitionId;
+ }
+
+ public String getLockId() {
+ return lockId;
+ }
+
+ public void setLockId(String lockId) {
+ this.lockId = lockId;
+ }
+
+ public String getRedoRecordsTable() {
+ return redoRecordsTable;
+ }
+
+ public void setRedoRecordsTable(String redoRecordsTable) {
+ this.redoRecordsTable = redoRecordsTable;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/LockId.java b/src/main/java/com/att/research/mdbc/LockId.java
new file mode 100644
index 0000000..a1de21a
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/LockId.java
@@ -0,0 +1,46 @@
+package com.att.research.mdbc;
+
+public class LockId {
+ private String primaryKey;
+ private String domain;
+ private String lockReference;
+
+ public LockId(String primaryKey, String domain, String lockReference){
+ this.primaryKey = primaryKey;
+ this.domain = domain;
+ if(lockReference == null) {
+ this.lockReference = "";
+ }
+ else{
+ this.lockReference = lockReference;
+ }
+ }
+
+ public String getFullyQualifiedLockKey(){
+ return this.domain+"."+this.primaryKey;
+ }
+
+ public String getPrimaryKey() {
+ return primaryKey;
+ }
+
+ public void setPrimaryKey(String primaryKey) {
+ this.primaryKey = primaryKey;
+ }
+
+ public String getDomain() {
+ return domain;
+ }
+
+ public void setDomain(String domain) {
+ this.domain = domain;
+ }
+
+ public String getLockReference() {
+ return lockReference;
+ }
+
+ public void setLockReference(String lockReference) {
+ this.lockReference = lockReference;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/MDBCUtils.java b/src/main/java/com/att/research/mdbc/MDBCUtils.java
new file mode 100644
index 0000000..411be8d
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MDBCUtils.java
@@ -0,0 +1,62 @@
+package com.att.research.mdbc;
+
+import java.io.*;
+import java.util.Base64;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+import org.json.JSONObject;
+
+public class MDBCUtils {
+ /** Read the object from Base64 string. */
+ public static Object fromString( String s ) throws IOException ,
+ ClassNotFoundException {
+ byte [] data = Base64.getDecoder().decode( s );
+ ObjectInputStream ois = new ObjectInputStream(
+ new ByteArrayInputStream( data ) );
+ Object o = ois.readObject();
+ ois.close();
+ return o;
+ }
+
+ /** Write the object to a Base64 string. */
+ public static String toString( Serializable o ) throws IOException {
+ //TODO We may want to also compress beside serialize
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try {
+ ObjectOutputStream oos = new ObjectOutputStream(baos);
+ oos.writeObject(o);
+ oos.close();
+ return Base64.getEncoder().encodeToString(baos.toByteArray());
+ }
+ finally{
+ baos.close();
+ }
+ }
+
+ public static String toString( JSONObject o) throws IOException {
+ //TODO We may want to also compress beside serialize
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream( baos );
+ oos.writeObject( o );
+ oos.close();
+ return Base64.getEncoder().encodeToString(baos.toByteArray());
+ }
+
+ public static void saveToFile(String serializedContent, String filename, EELFLoggerDelegate logger) throws IOException {
+ try (PrintWriter fout = new PrintWriter(filename)) {
+ fout.println(serializedContent);
+ } catch (FileNotFoundException e) {
+ if(logger!=null){
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.IOERROR, ErrorTypes.UNKNOWN, ErrorSeverity.CRITICAL);
+ }
+ else {
+ e.printStackTrace();
+ }
+ throw e;
+ }
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java b/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java
new file mode 100644
index 0000000..fefce21
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcCallableStatement.java
@@ -0,0 +1,738 @@
+package com.att.research.mdbc;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.Ref;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.Map;
+
+import com.att.research.logging.EELFLoggerDelegate;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class MdbcCallableStatement extends MdbcPreparedStatement implements CallableStatement {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcCallableStatement.class);
+ @SuppressWarnings("unused")
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ public MdbcCallableStatement(Statement stmt, MusicSqlManager m) {
+ super(stmt, m);
+ }
+
+ public MdbcCallableStatement(Statement stmt, String sql, MusicSqlManager mgr) {
+ super(stmt, sql, mgr);
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName());
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName());
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterIndex, x);
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ return ((CallableStatement)stmt).getParameterMetaData();
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterIndex, x);
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterIndex, value);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale);
+ }
+
+ @Override
+ public boolean wasNull() throws SQLException {
+ return ((CallableStatement)stmt).wasNull();
+ }
+
+ @Override
+ public String getString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterIndex);
+ }
+
+ @Override
+ public boolean getBoolean(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterIndex);
+ }
+
+ @Override
+ public byte getByte(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterIndex);
+ }
+
+ @Override
+ public short getShort(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterIndex);
+ }
+
+ @Override
+ public int getInt(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterIndex);
+ }
+
+ @Override
+ public long getLong(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterIndex);
+ }
+
+ @Override
+ public float getFloat(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterIndex);
+ }
+
+ @Override
+ public double getDouble(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterIndex);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale);
+ }
+
+ @Override
+ public byte[] getBytes(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, map);
+ }
+
+ @Override
+ public Ref getRef(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterIndex);
+ }
+
+ @Override
+ public Blob getBlob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterIndex);
+ }
+
+ @Override
+ public Clob getClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterIndex);
+ }
+
+ @Override
+ public Array getArray(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex, cal);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public URL getURL(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterIndex);
+ }
+
+ @Override
+ public void setURL(String parameterName, URL val) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterName, val);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType);
+ }
+
+ @Override
+ public void setBoolean(String parameterName, boolean x) throws SQLException {
+ ((CallableStatement)stmt).setBoolean(parameterName, x);
+ }
+
+ @Override
+ public void setByte(String parameterName, byte x) throws SQLException {
+ ((CallableStatement)stmt).setByte(parameterName, x);
+ }
+
+ @Override
+ public void setShort(String parameterName, short x) throws SQLException {
+ ((CallableStatement)stmt).setShort(parameterName, x);
+ }
+
+ @Override
+ public void setInt(String parameterName, int x) throws SQLException {
+ ((CallableStatement)stmt).setInt(parameterName, x);
+ }
+
+ @Override
+ public void setLong(String parameterName, long x) throws SQLException {
+ ((CallableStatement)stmt).setLong(parameterName, x);
+ }
+
+ @Override
+ public void setFloat(String parameterName, float x) throws SQLException {
+ ((CallableStatement)stmt).setFloat(parameterName, x);
+ }
+
+ @Override
+ public void setDouble(String parameterName, double x) throws SQLException {
+ ((CallableStatement)stmt).setDouble(parameterName, x);
+ }
+
+ @Override
+ public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
+ ((CallableStatement)stmt).setBigDecimal(parameterName, x);
+ }
+
+ @Override
+ public void setString(String parameterName, String x) throws SQLException {
+ ((CallableStatement)stmt).setString(parameterName, x);
+ }
+
+ @Override
+ public void setBytes(String parameterName, byte[] x) throws SQLException {
+ ((CallableStatement)stmt).setBytes(parameterName, x);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x, cal);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public String getString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterName);
+ }
+
+ @Override
+ public boolean getBoolean(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterName);
+ }
+
+ @Override
+ public byte getByte(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterName);
+ }
+
+ @Override
+ public short getShort(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterName);
+ }
+
+ @Override
+ public int getInt(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterName);
+ }
+
+ @Override
+ public long getLong(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterName);
+ }
+
+ @Override
+ public float getFloat(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterName);
+ }
+
+ @Override
+ public double getDouble(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterName);
+ }
+
+ @Override
+ public byte[] getBytes(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName);
+ }
+
+ @Override
+ public Time getTime(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, map);
+ }
+
+ @Override
+ public Ref getRef(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterName);
+ }
+
+ @Override
+ public Blob getBlob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterName);
+ }
+
+ @Override
+ public Clob getClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterName);
+ }
+
+ @Override
+ public Array getArray(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName, cal);
+ }
+
+ @Override
+ public Time getTime(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName, cal);
+ }
+
+ @Override
+ public URL getURL(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterName);
+ }
+
+ @Override
+ public RowId getRowId(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterIndex);
+ }
+
+ @Override
+ public RowId getRowId(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterName);
+ }
+
+ @Override
+ public void setRowId(String parameterName, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterName, x);
+ }
+
+ @Override
+ public void setNString(String parameterName, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterName, value);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader, length);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader, length);
+ }
+
+ @Override
+ public NClob getNClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterIndex);
+ }
+
+ @Override
+ public NClob getNClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterName);
+ }
+
+ @Override
+ public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject);
+ }
+
+ @Override
+ public SQLXML getSQLXML(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterIndex);
+ }
+
+ @Override
+ public SQLXML getSQLXML(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterName);
+ }
+
+ @Override
+ public String getNString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterIndex);
+ }
+
+ @Override
+ public String getNString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterName);
+ }
+
+ @Override
+ public Reader getNCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getNCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterName);
+ }
+
+ @Override
+ public Reader getCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterName);
+ }
+
+ @Override
+ public void setBlob(String parameterName, Blob x) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, x);
+ }
+
+ @Override
+ public void setClob(String parameterName, Clob x) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader);
+ }
+
+ @Override
+ public <T> T getObject(int parameterIndex, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, type);
+ }
+
+ @Override
+ public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, type);
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcConnection.java b/src/main/java/com/att/research/mdbc/MdbcConnection.java
new file mode 100644
index 0000000..d471522
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcConnection.java
@@ -0,0 +1,419 @@
+package com.att.research.mdbc;
+
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.exceptions.QueryException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+import com.att.research.mdbc.mixins.MusicInterface;
+import com.att.research.mdbc.mixins.TxCommitProgress;
+
+
+/**
+ * ProxyConnection is a proxy to a JDBC driver Connection. It uses the MusicSqlManager to copy
+ * data to and from Cassandra and the underlying JDBC database as needed. It will notify the underlying
+ * MusicSqlManager of any calls to <code>commit(), rollback()</code> or <code>setAutoCommit()</code>.
+ * Otherwise it just forwards all requests to the underlying Connection of the 'real' database.
+ *
+ * @author Robert Eby
+ */
+public class MdbcConnection implements Connection {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcConnection.class);
+
+ private final String id; // This is the transaction id, assigned to this connection. There is no need to change the id, if connection is reused
+ private final Connection conn; // the JDBC Connection to the actual underlying database
+ private final MusicSqlManager mgr; // there should be one MusicSqlManager in use per Connection
+ private final TxCommitProgress progressKeeper;
+ private final DatabasePartition partition;
+
+ public MdbcConnection(String id, String url, Connection c, Properties info, MusicInterface mi, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException {
+ this.id = id;
+ if (c == null) {
+ throw new MDBCServiceException("Connection is null");
+ }
+ this.conn = c;
+ try {
+ this.mgr = new MusicSqlManager(url, c, info, mi);
+ } catch (MDBCServiceException e) {
+ logger.error("Failure in creating Music SQL Manager");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw e;
+ }
+ try {
+ this.mgr.setAutoCommit(c.getAutoCommit(),null,null,null);
+ } catch (SQLException e) {
+ logger.error("Failure in autocommit");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ }
+
+ // Verify the tables in MUSIC match the tables in the database
+ // and create triggers on any tables that need them
+ //mgr.synchronizeTableData();
+ if ( mgr != null ) try {
+ mgr.synchronizeTables();
+ } catch (QueryException e) {
+ logger.error("Error syncrhonizing tables");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ }
+ else {
+ logger.error(EELFLoggerDelegate.errorLogger, "MusicSqlManager was not correctly created", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL);
+ throw new MDBCServiceException("Music SQL Manager object is null or invalid");
+ }
+ this.progressKeeper = progressKeeper;
+ this.partition = partition;
+ logger.debug("Mdbc connection created with id: "+id);
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxyconn unwrap: " + iface.getName());
+ return conn.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement iswrapperfor: " + iface.getName());
+ return conn.isWrapperFor(iface);
+ }
+
+ @Override
+ public Statement createStatement() throws SQLException {
+ return new MdbcCallableStatement(conn.createStatement(), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql) throws SQLException {
+ //TODO: grab the sql call from here and all the other preparestatement calls
+ return new MdbcPreparedStatement(conn.prepareStatement(sql), sql, mgr);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareCall(sql), mgr);
+ }
+
+ @Override
+ public String nativeSQL(String sql) throws SQLException {
+ return conn.nativeSQL(sql);
+ }
+
+ @Override
+ public void setAutoCommit(boolean autoCommit) throws SQLException {
+ boolean b = conn.getAutoCommit();
+ if (b != autoCommit) {
+ if(progressKeeper!=null) progressKeeper.commitRequested(id);
+ try {
+ mgr.setAutoCommit(autoCommit,id,progressKeeper,partition);
+ if(progressKeeper!=null)
+ progressKeeper.setMusicDone(id);
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL);
+ throw new SQLException("Failure commiting to MUSIC");
+ }
+ conn.setAutoCommit(autoCommit);
+ if(progressKeeper!=null) {
+ progressKeeper.setSQLDone(id);
+ }
+ if(progressKeeper!=null&&progressKeeper.isComplete(id)){
+ progressKeeper.reinitializeTxProgress(id);
+ }
+ }
+ }
+
+ @Override
+ public boolean getAutoCommit() throws SQLException {
+ return conn.getAutoCommit();
+ }
+
+ @Override
+ public void commit() throws SQLException {
+ if(progressKeeper.isComplete(id)) {
+ return;
+ }
+ if(progressKeeper != null) {
+ progressKeeper.commitRequested(id);
+ }
+
+ try {
+ mgr.commit(id,progressKeeper,partition);
+ } catch (MDBCServiceException e) {
+ //If the commit fail, then a new commitId should be used
+ logger.error(EELFLoggerDelegate.errorLogger, "Commit to music failed", AppMessages.UNKNOWNERROR, ErrorTypes.UNKNOWN, ErrorSeverity.FATAL);
+ throw new SQLException("Failure commiting to MUSIC");
+ }
+
+ if(progressKeeper != null) {
+ progressKeeper.setMusicDone(id);
+ }
+
+ conn.commit();
+
+ if(progressKeeper != null) {
+ progressKeeper.setSQLDone(id);
+ }
+ //MusicMixin.releaseZKLocks(MusicMixin.currentLockMap.get(getConnID()));
+ if(progressKeeper.isComplete(id)){
+ progressKeeper.reinitializeTxProgress(id);
+ }
+ }
+
+ @Override
+ public void rollback() throws SQLException {
+ mgr.rollback();
+ conn.rollback();
+ progressKeeper.reinitializeTxProgress(id);
+ }
+
+ @Override
+ public void close() throws SQLException {
+ logger.debug("Closing mdbc connection with id:"+id);
+ if (mgr != null) {
+ logger.debug("Closing mdbc manager with id:"+id);
+ mgr.close();
+ }
+ if (conn != null && !conn.isClosed()) {
+ logger.debug("Closing jdbc from mdbc with id:"+id);
+ conn.close();
+ logger.debug("Connection was closed for id:" + id);
+ }
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return conn.isClosed();
+ }
+
+ @Override
+ public DatabaseMetaData getMetaData() throws SQLException {
+ return conn.getMetaData();
+ }
+
+ @Override
+ public void setReadOnly(boolean readOnly) throws SQLException {
+ conn.setReadOnly(readOnly);
+ }
+
+ @Override
+ public boolean isReadOnly() throws SQLException {
+ return conn.isReadOnly();
+ }
+
+ @Override
+ public void setCatalog(String catalog) throws SQLException {
+ conn.setCatalog(catalog);
+ }
+
+ @Override
+ public String getCatalog() throws SQLException {
+ return conn.getCatalog();
+ }
+
+ @Override
+ public void setTransactionIsolation(int level) throws SQLException {
+ conn.setTransactionIsolation(level);
+ }
+
+ @Override
+ public int getTransactionIsolation() throws SQLException {
+ return conn.getTransactionIsolation();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return conn.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ conn.clearWarnings();
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
+ return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
+ throws SQLException {
+ return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency), sql, mgr);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency), mgr);
+ }
+
+ @Override
+ public Map<String, Class<?>> getTypeMap() throws SQLException {
+ return conn.getTypeMap();
+ }
+
+ @Override
+ public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
+ conn.setTypeMap(map);
+ }
+
+ @Override
+ public void setHoldability(int holdability) throws SQLException {
+ conn.setHoldability(holdability);
+ }
+
+ @Override
+ public int getHoldability() throws SQLException {
+ return conn.getHoldability();
+ }
+
+ @Override
+ public Savepoint setSavepoint() throws SQLException {
+ return conn.setSavepoint();
+ }
+
+ @Override
+ public Savepoint setSavepoint(String name) throws SQLException {
+ return conn.setSavepoint(name);
+ }
+
+ @Override
+ public void rollback(Savepoint savepoint) throws SQLException {
+ conn.rollback(savepoint);
+ }
+
+ @Override
+ public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+ conn.releaseSavepoint(savepoint);
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
+ return new MdbcCallableStatement(conn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency,
+ int resultSetHoldability) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability), sql, mgr);
+ }
+
+ @Override
+ public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency,
+ int resultSetHoldability) throws SQLException {
+ return new MdbcCallableStatement(conn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability), mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
+ return new MdbcPreparedStatement(conn.prepareStatement(sql, autoGeneratedKeys), sql, mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
+ return new MdbcPreparedStatement(conn.prepareStatement(sql, columnIndexes), sql, mgr);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
+ return new MdbcPreparedStatement(conn.prepareStatement(sql, columnNames), sql, mgr);
+ }
+
+ @Override
+ public Clob createClob() throws SQLException {
+ return conn.createClob();
+ }
+
+ @Override
+ public Blob createBlob() throws SQLException {
+ return conn.createBlob();
+ }
+
+ @Override
+ public NClob createNClob() throws SQLException {
+ return conn.createNClob();
+ }
+
+ @Override
+ public SQLXML createSQLXML() throws SQLException {
+ return conn.createSQLXML();
+ }
+
+ @Override
+ public boolean isValid(int timeout) throws SQLException {
+ return conn.isValid(timeout);
+ }
+
+ @Override
+ public void setClientInfo(String name, String value) throws SQLClientInfoException {
+ conn.setClientInfo(name, value);
+ }
+
+ @Override
+ public void setClientInfo(Properties properties) throws SQLClientInfoException {
+ conn.setClientInfo(properties);
+ }
+
+ @Override
+ public String getClientInfo(String name) throws SQLException {
+ return conn.getClientInfo(name);
+ }
+
+ @Override
+ public Properties getClientInfo() throws SQLException {
+ return conn.getClientInfo();
+ }
+
+ @Override
+ public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
+ return conn.createArrayOf(typeName, elements);
+ }
+
+ @Override
+ public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
+ return conn.createStruct(typeName, attributes);
+ }
+
+ @Override
+ public void setSchema(String schema) throws SQLException {
+ conn.setSchema(schema);
+ }
+
+ @Override
+ public String getSchema() throws SQLException {
+ return conn.getSchema();
+ }
+
+ @Override
+ public void abort(Executor executor) throws SQLException {
+ conn.abort(executor);
+ }
+
+ @Override
+ public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
+ conn.setNetworkTimeout(executor, milliseconds);
+ }
+
+ @Override
+ public int getNetworkTimeout() throws SQLException {
+ return conn.getNetworkTimeout();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java b/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java
new file mode 100644
index 0000000..d35a20a
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcPreparedStatement.java
@@ -0,0 +1,743 @@
+package com.att.research.mdbc;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+
+import org.apache.commons.lang3.StringUtils;
+
+import com.att.research.logging.EELFLoggerDelegate;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class MdbcPreparedStatement extends MdbcStatement implements PreparedStatement {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcPreparedStatement.class);
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ final String sql; // holds the sql statement if prepared statement
+ String[] params; // holds the parameters if prepared statement, indexing starts at 1
+
+
+ public MdbcPreparedStatement(Statement stmt, MusicSqlManager m) {
+ super(stmt, m);
+ this.sql = null;
+ }
+
+ public MdbcPreparedStatement(Statement stmt, String sql, MusicSqlManager mgr) {
+ super(stmt, sql, mgr);
+ this.sql = sql;
+ //indexing starts at 1
+ params = new String[StringUtils.countMatches(sql, "?")+1];
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = stmt.executeQuery(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: ");
+ stmt.close();
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize");
+ return stmt.getMaxFieldSize();
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ stmt.setMaxFieldSize(max);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ return stmt.getMaxRows();
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ stmt.setMaxRows(max);
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ stmt.setEscapeProcessing(enable);
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ return stmt.getQueryTimeout();
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds);
+ stmt.setQueryTimeout(seconds);
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ stmt.cancel();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return stmt.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ stmt.clearWarnings();
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ stmt.setCursorName(name);
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e);
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return stmt.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return stmt.getUpdateCount();
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return stmt.getMoreResults();
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ stmt.setFetchDirection(direction);
+ }
+
+ @Override
+ public int getFetchDirection() throws SQLException {
+ return stmt.getFetchDirection();
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ stmt.setFetchSize(rows);
+ }
+
+ @Override
+ public int getFetchSize() throws SQLException {
+ return stmt.getFetchSize();
+ }
+
+ @Override
+ public int getResultSetConcurrency() throws SQLException {
+ return stmt.getResultSetConcurrency();
+ }
+
+ @Override
+ public int getResultSetType() throws SQLException {
+ return stmt.getResultSetType();
+ }
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ stmt.addBatch(sql);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ stmt.clearBatch();
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: ");
+ int[] n = null;
+ try {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result.");
+ n = stmt.executeBatch();
+ synchronizeTables(null);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return stmt.getConnection();
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ return stmt.getMoreResults(current);
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ return stmt.getGeneratedKeys();
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return stmt.getResultSetHoldability();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return stmt.isClosed();
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ stmt.setPoolable(poolable);
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ return stmt.isPoolable();
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ stmt.closeOnCompletion();
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return stmt.isCloseOnCompletion();
+ }
+
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = ((PreparedStatement)stmt).executeQuery();;
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ e.printStackTrace();
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = ((PreparedStatement)stmt).executeUpdate();
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ e.printStackTrace();
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+ ((PreparedStatement)stmt).setNull(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+ ((PreparedStatement)stmt).setBoolean(parameterIndex, x);
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+ ((PreparedStatement)stmt).setByte(parameterIndex, x);
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+ ((PreparedStatement)stmt).setShort(parameterIndex, x);
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+ ((PreparedStatement)stmt).setInt(parameterIndex, x);
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+ ((PreparedStatement)stmt).setLong(parameterIndex, x);
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+ ((PreparedStatement)stmt).setFloat(parameterIndex, x);
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+ ((PreparedStatement)stmt).setDouble(parameterIndex, x);
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
+ ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x);
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+ ((PreparedStatement)stmt).setString(parameterIndex, x);
+ params[parameterIndex] = x;
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+ ((PreparedStatement)stmt).setBytes(parameterIndex, x);
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
+ ((PreparedStatement)stmt).setTimestamp(parameterIndex, x);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+ ((PreparedStatement)stmt).clearParameters();
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x);
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = ((PreparedStatement)stmt).execute();
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ e.printStackTrace();
+ String nm = e.getClass().getName();
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+ ((PreparedStatement)stmt).addBatch();
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
+ ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+ ((PreparedStatement)stmt).setRef(parameterIndex, x);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+ ((PreparedStatement)stmt).setBlob(parameterIndex, x);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+ ((PreparedStatement)stmt).setClob(parameterIndex, x);
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+ ((PreparedStatement)stmt).setArray(parameterIndex, x);
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+ return ((PreparedStatement)stmt).getMetaData();
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterIndex, x);
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ return ((CallableStatement)stmt).getParameterMetaData();
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterIndex, x);
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterIndex, value);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader);
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/MdbcServer.java b/src/main/java/com/att/research/mdbc/MdbcServer.java
new file mode 100644
index 0000000..54accaa
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcServer.java
@@ -0,0 +1,162 @@
+package com.att.research.mdbc;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import org.apache.calcite.avatica.remote.Driver.Serialization;
+import org.apache.calcite.avatica.remote.LocalService;
+import org.apache.calcite.avatica.server.HttpServer;
+import org.apache.calcite.avatica.util.Unsafe;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.beust.jcommander.IStringConverter;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
+import java.util.Locale;
+import java.util.Properties;
+
+public class MdbcServer {
+ public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(MdbcStatement.class);
+
+ @Parameter(names = { "-c", "--configuration" }, required = true,
+ description = "This is the file that contains the ranges that are assigned to this MDBC server")
+ private String configurationFile;
+
+ @Parameter(names = { "-u", "--url" }, required = true,
+ description = "JDBC driver url for the server")
+ private String url;
+
+ @Parameter(names = { "-p", "--port" }, required = true,
+ description = "Port the server should bind")
+ private int port;
+
+ @Parameter(names = { "-s", "--user" }, required = true,
+ description = "Mysql usr")
+ private String user;
+
+ @Parameter(names = { "-a", "--pass" }, required = true,
+ description = "Mysql password")
+ private String password;
+
+ final private Serialization serialization = Serialization.PROTOBUF;
+
+ @Parameter(names = { "-h", "-help", "--help" }, help = true,
+ description = "Print the help message")
+ private boolean help = false;
+
+ private NodeConfiguration config;
+ private HttpServer server;
+
+ public void start() {
+ if (null != server) {
+ LOG.error("The server was already started");
+ Unsafe.systemExit(ExitCodes.ALREADY_STARTED.ordinal());
+ return;
+ }
+
+ try {
+ config = NodeConfiguration.readJsonFromFile(configurationFile);
+ //\TODO Add configuration file with Server Info
+ Properties connectionProps = new Properties();
+ connectionProps.put("user", user);
+ connectionProps.put("password", password);
+ MdbcServerLogic meta = new MdbcServerLogic(url,connectionProps,config);
+ LocalService service = new LocalService(meta);
+
+ // Construct the server
+ this.server = new HttpServer.Builder<>()
+ .withHandler(service, serialization)
+ .withPort(port)
+ .build();
+
+ // Then start it
+ server.start();
+
+ LOG.info("Started Avatica server on port {} with serialization {}", server.getPort(),
+ serialization);
+ } catch (Exception e) {
+ LOG.error("Failed to start Avatica server", e);
+ Unsafe.systemExit(ExitCodes.START_FAILED.ordinal());
+ }
+ }
+
+ public void stop() {
+ if (null != server) {
+ server.stop();
+ server = null;
+ }
+ }
+
+ public void join() throws InterruptedException {
+ server.join();
+ }
+
+ public static void main(String[] args) {
+ final MdbcServer server = new MdbcServer();
+ @SuppressWarnings("deprecation")
+ JCommander jc = new JCommander(server, args);
+ if (server.help) {
+ jc.usage();
+ Unsafe.systemExit(ExitCodes.USAGE.ordinal());
+ return;
+ }
+
+ server.start();
+
+ // Try to clean up when the server is stopped.
+ Runtime.getRuntime().addShutdownHook(
+ new Thread(new Runnable() {
+ @Override public void run() {
+ LOG.info("Stopping server");
+ server.stop();
+ LOG.info("Server stopped");
+ }
+ }));
+
+ try {
+ server.join();
+ } catch (InterruptedException e) {
+ // Reset interruption
+ Thread.currentThread().interrupt();
+ // And exit now.
+ return;
+ }
+ }
+
+ /**
+ * Converter from String to Serialization. Must be public for JCommander.
+ */
+ public static class SerializationConverter implements IStringConverter<Serialization> {
+ @Override public Serialization convert(String value) {
+ return Serialization.valueOf(value.toUpperCase(Locale.ROOT));
+ }
+ }
+
+ /**
+ * Codes for exit conditions
+ */
+ private enum ExitCodes {
+ NORMAL,
+ ALREADY_STARTED, // 1
+ START_FAILED, // 2
+ USAGE; // 3
+ }
+}
+
+// End StandaloneServer.java
diff --git a/src/main/java/com/att/research/mdbc/MdbcServerLogic.java b/src/main/java/com/att/research/mdbc/MdbcServerLogic.java
new file mode 100644
index 0000000..72cc73c
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcServerLogic.java
@@ -0,0 +1,312 @@
+package com.att.research.mdbc;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.calcite.avatica.MissingResultsException;
+import org.apache.calcite.avatica.NoSuchStatementException;
+import org.apache.calcite.avatica.jdbc.JdbcMeta;
+import org.apache.calcite.avatica.remote.TypedValue;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+
+public class MdbcServerLogic extends JdbcMeta{
+
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcServerLogic.class);
+
+ StateManager manager;
+ DatabasePartition ranges;
+ String name;
+ String sqlDatabase;
+
+ //TODO: Delete this properties after debugging
+ private final Properties info;
+ private final Cache<String, Connection> connectionCache;
+
+ public MdbcServerLogic(String Url, Properties info,NodeConfiguration config) throws SQLException, MDBCServiceException {
+ super(Url,info);
+ this.ranges = config.partition;
+ this.name = config.nodeName;
+ this.sqlDatabase = config.sqlDatabaseName;
+ this.manager = new StateManager(Url,info,this.ranges,this.sqlDatabase);
+ this.info = info;
+ int concurrencyLevel = Integer.parseInt(
+ info.getProperty(ConnectionCacheSettings.CONCURRENCY_LEVEL.key(),
+ ConnectionCacheSettings.CONCURRENCY_LEVEL.defaultValue()));
+ int initialCapacity = Integer.parseInt(
+ info.getProperty(ConnectionCacheSettings.INITIAL_CAPACITY.key(),
+ ConnectionCacheSettings.INITIAL_CAPACITY.defaultValue()));
+ long maxCapacity = Long.parseLong(
+ info.getProperty(ConnectionCacheSettings.MAX_CAPACITY.key(),
+ ConnectionCacheSettings.MAX_CAPACITY.defaultValue()));
+ long connectionExpiryDuration = Long.parseLong(
+ info.getProperty(ConnectionCacheSettings.EXPIRY_DURATION.key(),
+ ConnectionCacheSettings.EXPIRY_DURATION.defaultValue()));
+ TimeUnit connectionExpiryUnit = TimeUnit.valueOf(
+ info.getProperty(ConnectionCacheSettings.EXPIRY_UNIT.key(),
+ ConnectionCacheSettings.EXPIRY_UNIT.defaultValue()));
+ this.connectionCache = CacheBuilder.newBuilder()
+ .concurrencyLevel(concurrencyLevel)
+ .initialCapacity(initialCapacity)
+ .maximumSize(maxCapacity)
+ .expireAfterAccess(connectionExpiryDuration, connectionExpiryUnit)
+ .removalListener(new ConnectionExpiryHandler())
+ .build();
+ }
+
+ @Override
+ protected Connection getConnection(String id) throws SQLException {
+ if (id == null) {
+ throw new NullPointerException("Connection id is null");
+ }
+ //\TODO: don't use connectionCache, use this.manager internal state
+ Connection conn = connectionCache.getIfPresent(id);
+ if (conn == null) {
+ this.manager.CloseConnection(id);
+ logger.error(EELFLoggerDelegate.errorLogger,"Connection not found: invalid id, closed, or expired: "
+ + id);
+ throw new RuntimeException(" Connection not found: invalid id, closed, or expired: " + id);
+ }
+ return conn;
+ }
+
+ @Override
+ public void openConnection(ConnectionHandle ch, Map<String, String> information) {
+ Properties fullInfo = new Properties();
+ fullInfo.putAll(this.info);
+ if (information != null) {
+ fullInfo.putAll(information);
+ }
+
+ final ConcurrentMap<String, Connection> cacheAsMap = this.connectionCache.asMap();
+ if (cacheAsMap.containsKey(ch.id)) {
+ throw new RuntimeException("Connection already exists: " + ch.id);
+ }
+ // Avoid global synchronization of connection opening
+ try {
+ this.manager.OpenConnection(ch.id, info);
+ Connection conn = this.manager.GetConnection(ch.id);
+ if(conn == null) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Connection created was null");
+ throw new RuntimeException("Connection created was null for connection: " + ch.id);
+ }
+ Connection loadedConn = cacheAsMap.putIfAbsent(ch.id, conn);
+ logger.info("connection created with id {}", ch.id);
+ // Race condition: someone beat us to storing the connection in the cache.
+ if (loadedConn != null) {
+ //\TODO check if we added an additional race condition for this
+ this.manager.CloseConnection(ch.id);
+ conn.close();
+ throw new RuntimeException("Connection already exists: " + ch.id);
+ }
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void closeConnection(ConnectionHandle ch) {
+ //\TODO use state connection instead
+ Connection conn = connectionCache.getIfPresent(ch.id);
+ if (conn == null) {
+ logger.debug("client requested close unknown connection {}", ch);
+ return;
+ }
+ logger.trace("closing connection {}", ch);
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw new RuntimeException(e.getMessage());
+ } finally {
+ connectionCache.invalidate(ch.id);
+ this.manager.CloseConnection(ch.id);
+ logger.info("connection closed with id {}", ch.id);
+ }
+ }
+
+ @Override
+ public void commit(ConnectionHandle ch) {
+ try {
+ super.commit(ch);
+ logger.debug("connection commited with id {}", ch.id);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ }
+
+ //\TODO All the following functions can be deleted
+ // Added for two reasons: debugging and logging
+ @Override
+ public StatementHandle prepare(ConnectionHandle ch, String sql, long maxRowCount) {
+ StatementHandle h;
+ try {
+ h = super.prepare(ch, sql, maxRowCount);
+ logger.debug("prepared statement {}", h);
+ } catch (Exception e ) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(e);
+ }
+ return h;
+ }
+
+ @Override
+ public ExecuteResult prepareAndExecute(StatementHandle h, String sql, long maxRowCount, int maxRowsInFirstFrame,
+ PrepareCallback callback) throws NoSuchStatementException {
+ ExecuteResult e;
+ try {
+ e = super.prepareAndExecute(h, sql, maxRowCount,maxRowsInFirstFrame,callback);
+ logger.debug("prepare and execute statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public ExecuteBatchResult prepareAndExecuteBatch(StatementHandle h, List<String> sqlCommands)
+ throws NoSuchStatementException {
+ ExecuteBatchResult e;
+ try {
+ e = super.prepareAndExecuteBatch(h, sqlCommands);
+ logger.debug("prepare and execute batch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public ExecuteBatchResult executeBatch(StatementHandle h, List<List<TypedValue>> parameterValues)
+ throws NoSuchStatementException {
+ ExecuteBatchResult e;
+ try {
+ e = super.executeBatch(h, parameterValues);
+ logger.debug("execute batch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public Frame fetch(StatementHandle h, long offset, int fetchMaxRowCount)
+ throws NoSuchStatementException, MissingResultsException {
+ Frame f;
+ try {
+ f = super.fetch(h, offset, fetchMaxRowCount);
+ logger.debug("fetch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return f;
+ }
+
+ @Override
+ public ExecuteResult execute(StatementHandle h, List<TypedValue> parameterValues, long maxRowCount)
+ throws NoSuchStatementException {
+ ExecuteResult e;
+ try {
+ e = super.execute(h, parameterValues, maxRowCount);
+ logger.debug("fetch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public ExecuteResult execute(StatementHandle h, List<TypedValue> parameterValues, int maxRowsInFirstFrame)
+ throws NoSuchStatementException {
+ ExecuteResult e;
+ try {
+ e = super.execute(h, parameterValues, maxRowsInFirstFrame);
+ logger.debug("fetch statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return e;
+ }
+
+ @Override
+ public StatementHandle createStatement(ConnectionHandle ch) {
+ StatementHandle h;
+ try {
+ h = super.createStatement(ch);
+ logger.debug("create statement {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ return h;
+ }
+
+ @Override
+ public void closeStatement(StatementHandle h) {
+ try {
+ super.closeStatement(h);
+ logger.debug("statement closed {}", h);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ }
+
+
+
+
+
+
+
+ @Override
+ public void rollback(ConnectionHandle ch) {
+ try {
+ super.rollback(ch);
+ logger.debug("connection rollback with id {}", ch.id);
+ } catch (Exception err ) {
+ logger.error(EELFLoggerDelegate.errorLogger, err.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw(err);
+ }
+ }
+
+ private class ConnectionExpiryHandler
+ implements RemovalListener<String, Connection> {
+
+ public void onRemoval(RemovalNotification<String, Connection> notification) {
+ String connectionId = notification.getKey();
+ Connection doomed = notification.getValue();
+ logger.debug("Expiring connection {} because {}", connectionId, notification.getCause());
+ try {
+ if (doomed != null) {
+ doomed.close();
+ }
+ } catch (Throwable t) {
+ logger.warn("Exception thrown while expiring connection {}", connectionId, t);
+ }
+ }
+ }
+}
+
+
diff --git a/src/main/java/com/att/research/mdbc/MdbcStatement.java b/src/main/java/com/att/research/mdbc/MdbcStatement.java
new file mode 100644
index 0000000..e03fbda
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MdbcStatement.java
@@ -0,0 +1,416 @@
+package com.att.research.mdbc;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.Statement;
+
+import com.att.research.exceptions.QueryException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class MdbcStatement implements Statement {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MdbcStatement.class);
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ final Statement stmt; // the Statement that we are proxying
+ final MusicSqlManager mgr;
+ //\TODO We may need to all pass the connection object to support autocommit
+
+ public MdbcStatement(Statement s, MusicSqlManager m) {
+ this.stmt = s;
+ this.mgr = m;
+ }
+
+ public MdbcStatement(Statement stmt, String sql, MusicSqlManager mgr) {
+ //\TODO why there is a constructor with a sql parameter in a not PreparedStatement
+ this.stmt = stmt;
+ this.mgr = mgr;
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement unwrap: " + iface.getName());
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ logger.error(EELFLoggerDelegate.errorLogger, "proxystatement isWrapperFor: " + iface.getName());
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = stmt.executeQuery(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "executeUpdate: exception "+nm+" "+e);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Statement close: ");
+ stmt.close();
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"getMaxFieldSize");
+ return stmt.getMaxFieldSize();
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ stmt.setMaxFieldSize(max);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ return stmt.getMaxRows();
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ stmt.setMaxRows(max);
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ stmt.setEscapeProcessing(enable);
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ return stmt.getQueryTimeout();
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ //\TODO: we also need to implement a higher level timeout in MDBC
+ logger.debug(EELFLoggerDelegate.applicationLogger,"setQueryTimeout seconds "+ seconds);
+ stmt.setQueryTimeout(seconds);
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ stmt.cancel();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return stmt.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ stmt.clearWarnings();
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ stmt.setCursorName(name);
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ //\TODO Add the result of the postStatementHook to b
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger, "execute: exception "+nm+" "+e);
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger, " Exception "+nm+" "+e);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return stmt.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return stmt.getUpdateCount();
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return stmt.getMoreResults();
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ stmt.setFetchDirection(direction);
+ }
+
+ @Override
+ public int getFetchDirection() throws SQLException {
+ return stmt.getFetchDirection();
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ stmt.setFetchSize(rows);
+ }
+
+ @Override
+ public int getFetchSize() throws SQLException {
+ return stmt.getFetchSize();
+ }
+
+ @Override
+ public int getResultSetConcurrency() throws SQLException {
+ return stmt.getResultSetConcurrency();
+ }
+
+ @Override
+ public int getResultSetType() throws SQLException {
+ return stmt.getResultSetType();
+ }
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ stmt.addBatch(sql);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ stmt.clearBatch();
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch: ");
+ int[] n = null;
+ try {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeBatch() is not supported by MDBC; your results may be incorrect as a result.");
+ n = stmt.executeBatch();
+ synchronizeTables(null);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeBatch: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return stmt.getConnection();
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ return stmt.getMoreResults(current);
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ return stmt.getGeneratedKeys();
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, String[] columnNames) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"execute: "+sql);
+ //\TODO Idem to the other execute without columnNames
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.error(EELFLoggerDelegate.errorLogger,"execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return stmt.getResultSetHoldability();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return stmt.isClosed();
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ stmt.setPoolable(poolable);
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ return stmt.isPoolable();
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ stmt.closeOnCompletion();
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return stmt.isCloseOnCompletion();
+ }
+
+ protected void synchronizeTables(String sql) {
+ if (sql == null || sql.trim().toLowerCase().startsWith("create")) {
+ if (mgr != null) {
+ try {
+ mgr.synchronizeTables();
+ } catch (QueryException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/MusicSqlManager.java b/src/main/java/com/att/research/mdbc/MusicSqlManager.java
new file mode 100755
index 0000000..4330cfe
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/MusicSqlManager.java
@@ -0,0 +1,300 @@
+package com.att.research.mdbc;
+
+import java.sql.Connection;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+
+import org.json.JSONObject;
+
+import com.att.research.mdbc.mixins.DBInterface;
+import com.att.research.mdbc.mixins.MixinFactory;
+import com.att.research.mdbc.mixins.MusicInterface;
+import com.att.research.mdbc.mixins.StagingTable;
+import com.att.research.mdbc.mixins.TxCommitProgress;
+import com.att.research.mdbc.mixins.Utils;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.exceptions.QueryException;
+import com.att.research.logging.*;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+
+/**
+* <p>
+* MUSIC SQL Manager - code that helps take data written to a SQL database and seamlessly integrates it
+* with <a href="https://github.com/att/music">MUSIC</a> that maintains data in a No-SQL data-store
+* (<a href="http://cassandra.apache.org/">Cassandra</a>) and protects access to it with a distributed
+* locking service (based on <a href="https://zookeeper.apache.org/">Zookeeper</a>).
+* </p>
+* <p>
+* This code will support transactions by taking note of the value of the autoCommit flag, and of calls
+* to <code>commit()</code> and <code>rollback()</code>. These calls should be made by the user's JDBC
+* client.
+* </p>
+*
+* @author Bharath Balasubramanian, Robert Eby
+*/
+public class MusicSqlManager {
+
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicSqlManager.class);
+
+ private final DBInterface dbi;
+ private final MusicInterface mi;
+ private final Set<String> table_set;
+ private final HashMap<Range,StagingTable> transactionDigest;
+ private boolean autocommit; // a copy of the autocommit flag from the JDBC Connection
+
+ /**
+ * Build a MusicSqlManager for a DB connection. This construct may only be called by getMusicSqlManager(),
+ * which will ensure that only one MusicSqlManager is created per URL.
+ * This is the location where the appropriate mixins to use for the MusicSqlManager should be determined.
+ * They should be picked based upon the URL and the properties passed to this constructor.
+ * <p>
+ * At the present time, we only support the use of the H2Mixin (for access to a local H2 database),
+ * with the CassandraMixin (for direct access to a Cassandra noSQL DB as the persistence layer).
+ * </p>
+ *
+ * @param url the JDBC URL which was used to connection to the database
+ * @param conn the actual connection to the database
+ * @param info properties passed from the initial JDBC connect() call
+ * @throws MDBCServiceException
+ */
+ public MusicSqlManager(String url, Connection conn, Properties info, MusicInterface mi) throws MDBCServiceException {
+ try {
+ info.putAll(Utils.getMdbcProperties());
+ String mixinDb = info.getProperty(Configuration.KEY_DB_MIXIN_NAME, Configuration.DB_MIXIN_DEFAULT);
+ this.dbi = MixinFactory.createDBInterface(mixinDb, this, url, conn, info);
+ this.mi = mi;
+ this.table_set = Collections.synchronizedSet(new HashSet<String>());
+ this.autocommit = true;
+ this.transactionDigest = new HashMap<Range,StagingTable>();
+
+ }catch(Exception e) {
+ throw new MDBCServiceException(e.getMessage());
+ }
+ }
+
+ public void setAutoCommit(boolean b,String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException {
+ if (b != autocommit) {
+ autocommit = b;
+ logger.debug(EELFLoggerDelegate.applicationLogger,"autocommit changed to "+b);
+ if (b) {
+ // My reading is that turning autoCOmmit ON should automatically commit any outstanding transaction
+ if(txId == null || txId.isEmpty()) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Connection ID is null",AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ throw new MDBCServiceException("tx id is null");
+ }
+ commit(txId,progressKeeper,partition);
+ }
+ }
+ }
+
+ /**
+ * Close this MusicSqlManager.
+ */
+ public void close() {
+ if (dbi != null) {
+ dbi.close();
+ }
+ }
+
+ /**
+ * Code to be run within the DB driver before a SQL statement is executed. This is where tables
+ * can be synchronized before a SELECT, for those databases that do not support SELECT triggers.
+ * @param sql the SQL statement that is about to be executed
+ */
+ public void preStatementHook(final String sql) {
+ dbi.preStatementHook(sql);
+ }
+ /**
+ * Code to be run within the DB driver after a SQL statement has been executed. This is where remote
+ * statement actions can be copied back to Cassandra/MUSIC.
+ * @param sql the SQL statement that was executed
+ */
+ public void postStatementHook(final String sql) {
+ dbi.postStatementHook(sql,transactionDigest);
+ }
+ /**
+ * Synchronize the list of tables in SQL with the list in MUSIC. This function should be called when the
+ * proxy first starts, and whenever there is the possibility that tables were created or dropped. It is synchronized
+ * in order to prevent multiple threads from running this code in parallel.
+ */
+ public synchronized void synchronizeTables() throws QueryException {
+ Set<String> set1 = dbi.getSQLTableSet(); // set of tables in the database
+ logger.debug(EELFLoggerDelegate.applicationLogger, "synchronizing tables:" + set1);
+ for (String tableName : set1) {
+ // This map will be filled in if this table was previously discovered
+ if (!table_set.contains(tableName) && !dbi.getReservedTblNames().contains(tableName)) {
+ logger.info(EELFLoggerDelegate.applicationLogger, "New table discovered: "+tableName);
+ try {
+ TableInfo ti = dbi.getTableInfo(tableName);
+ mi.initializeMusicForTable(ti,tableName);
+ //\TODO Verify if table info can be modify in the previous step, if not this step can be deleted
+ ti = dbi.getTableInfo(tableName);
+ mi.createDirtyRowTable(ti,tableName);
+ dbi.createSQLTriggers(tableName);
+ table_set.add(tableName);
+ synchronizeTableData(tableName);
+ logger.debug(EELFLoggerDelegate.applicationLogger, "synchronized tables:" +
+ table_set.size() + "/" + set1.size() + "tables uploaded");
+ } catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ //logger.error(EELFLoggerDelegate.errorLogger, "Exception synchronizeTables: "+e);
+ throw new QueryException();
+ }
+ }
+ }
+
+// Set<String> set2 = getMusicTableSet(music_ns);
+ // not working - fix later
+// for (String tbl : set2) {
+// if (!set1.contains(tbl)) {
+// logger.debug("Old table dropped: "+tbl);
+// dropSQLTriggers(tbl, conn);
+// // ZZTODO drop camunda table ?
+// }
+// }
+ }
+
+ /**
+ * On startup, copy dirty data from Cassandra to H2. May not be needed.
+ * @param tableName
+ */
+ public void synchronizeTableData(String tableName) {
+ // TODO - copy MUSIC -> H2
+ dbi.synchronizeData(tableName);
+ }
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, and should be called by the underlying databases
+ * triggering mechanism. It first checks the local dirty bits table to see if there are any keys in Cassandra whose value
+ * has not yet been sent to SQL. If there are, the appropriate values are copied from Cassandra to the local database.
+ * Under normal execution, this function behaves as a NOP operation.
+ * @param tableName This is the table on which the SELECT is being performed
+ */
+ public void readDirtyRowsAndUpdateDb(String tableName) {
+ mi.readDirtyRowsAndUpdateDb(dbi,tableName);
+ }
+
+
+
+
+ /**
+ * This method gets the primary key that the music interfaces uses by default.
+ * If the front end uses a primary key, this will not match what is used in the MUSIC interface
+ * @return
+ */
+ public String getMusicDefaultPrimaryKeyName() {
+ return mi.getMusicDefaultPrimaryKeyName();
+ }
+
+ /**
+ * Asks music interface to provide the function to create a primary key
+ * e.g. uuid(), 1, "unique_aksd419fjc"
+ * @return
+ */
+ public String generateUniqueKey() {
+ //
+ return mi.generateUniqueKey();
+ }
+
+
+ /**
+ * Perform a commit, as requested by the JDBC driver. If any row updates have been delayed,
+ * they are performed now and copied into MUSIC.
+ * @throws MDBCServiceException
+ */
+ public synchronized void commit(String txId, TxCommitProgress progressKeeper, DatabasePartition partition) throws MDBCServiceException {
+ logger.debug(EELFLoggerDelegate.applicationLogger, " commit ");
+ // transaction was committed -- add all the updates into the REDO-Log in MUSIC
+ try {
+ mi.commitLog(dbi, partition, transactionDigest, txId, progressKeeper);
+ }catch(MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(), AppMessages.QUERYERROR, ErrorTypes.QUERYERROR, ErrorSeverity.CRITICAL);
+ throw e;
+ }
+ }
+
+ /**
+ * Perform a rollback, as requested by the JDBC driver. If any row updates have been delayed,
+ * they are discarded.
+ */
+ public synchronized void rollback() {
+ // transaction was rolled back - discard the updates
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Rollback");;
+ transactionDigest.clear();
+ }
+
+ /**
+ * Get all
+ * @param table
+ * @param dbRow
+ * @return
+ */
+ public String getMusicKeyFromRowWithoutPrimaryIndexes(String table, JSONObject dbRow) {
+ TableInfo ti = dbi.getTableInfo(table);
+ return mi.getMusicKeyFromRowWithoutPrimaryIndexes(ti,table, dbRow);
+ }
+
+ public String getMusicKeyFromRow(String table, JSONObject dbRow) {
+ TableInfo ti = dbi.getTableInfo(table);
+ return mi.getMusicKeyFromRow(ti,table, dbRow);
+ }
+
+ /**
+ * Returns all keys that matches the current sql statement, and not in already updated keys.
+ *
+ * @param sql the query that we are getting keys for
+ * @deprecated
+ */
+ public ArrayList<String> getMusicKeys(String sql) {
+ ArrayList<String> musicKeys = new ArrayList<String>();
+ //\TODO See if this is required
+ /*
+ try {
+ net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql);
+ if (stmt instanceof Insert) {
+ Insert s = (Insert) stmt;
+ String tbl = s.getTable().getName();
+ musicKeys.add(generatePrimaryKey());
+ } else {
+ String tbl;
+ String where = "";
+ if (stmt instanceof Update){
+ Update u = (Update) stmt;
+ tbl = u.getTables().get(0).getName();
+ where = u.getWhere().toString();
+ } else if (stmt instanceof Delete) {
+ Delete d = (Delete) stmt;
+ tbl = d.getTable().getName();
+ if (d.getWhere()!=null) {
+ where = d.getWhere().toString();
+ }
+ } else {
+ System.err.println("Not recognized sql type");
+ tbl = "";
+ }
+ String dbiSelect = "SELECT * FROM " + tbl;
+ if (!where.equals("")) {
+ dbiSelect += "WHERE" + where;
+ }
+ ResultSet rs = dbi.executeSQLRead(dbiSelect);
+ musicKeys.addAll(getMusicKeysWhere(tbl, Utils.parseResults(dbi.getTableInfo(tbl), rs)));
+ rs.getStatement().close();
+ }
+ } catch (JSQLParserException | SQLException e) {
+
+ e.printStackTrace();
+ }
+ System.err.print("MusicKeys:");
+ for(String musicKey:musicKeys) {
+ System.out.print(musicKey + ",");
+ }
+ */
+ return musicKeys;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/ProxyStatement.java b/src/main/java/com/att/research/mdbc/ProxyStatement.java
new file mode 100755
index 0000000..0b5edd8
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/ProxyStatement.java
@@ -0,0 +1,1262 @@
+package com.att.research.mdbc;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Calendar;
+import java.util.Map;
+
+import org.apache.log4j.Logger;
+
+import com.att.research.exceptions.QueryException;
+
+/**
+ * ProxyStatement is a proxy Statement that front ends Statements from the underlying JDBC driver. It passes all operations through,
+ * and invokes the MusicSqlManager when there is the possibility that database tables have been created or dropped.
+ *
+ * @author Robert Eby
+ */
+public class ProxyStatement implements CallableStatement {
+ private static final Logger logger = Logger.getLogger(ProxyStatement.class);
+ private static final String DATASTAX_PREFIX = "com.datastax.driver";
+
+ private final Statement stmt; // the Statement that we are proxying
+ private final MusicSqlManager mgr;
+
+ public ProxyStatement(Statement s, MusicSqlManager m) {
+ this.stmt = s;
+ this.mgr = m;
+ }
+
+ @Override
+ public <T> T unwrap(Class<T> iface) throws SQLException {
+ return stmt.unwrap(iface);
+ }
+
+ @Override
+ public boolean isWrapperFor(Class<?> iface) throws SQLException {
+ return stmt.isWrapperFor(iface);
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ logger.debug("executeQuery: "+sql);
+ ResultSet r = null;
+ try {
+ mgr.preStatementHook(sql);
+ r = stmt.executeQuery(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeQuery: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return r;
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public void close() throws SQLException {
+ stmt.close();
+ }
+
+ @Override
+ public int getMaxFieldSize() throws SQLException {
+ return stmt.getMaxFieldSize();
+ }
+
+ @Override
+ public void setMaxFieldSize(int max) throws SQLException {
+ stmt.setMaxFieldSize(max);
+ }
+
+ @Override
+ public int getMaxRows() throws SQLException {
+ return stmt.getMaxRows();
+ }
+
+ @Override
+ public void setMaxRows(int max) throws SQLException {
+ stmt.setMaxRows(max);
+ }
+
+ @Override
+ public void setEscapeProcessing(boolean enable) throws SQLException {
+ stmt.setEscapeProcessing(enable);
+ }
+
+ @Override
+ public int getQueryTimeout() throws SQLException {
+ return stmt.getQueryTimeout();
+ }
+
+ @Override
+ public void setQueryTimeout(int seconds) throws SQLException {
+ stmt.setQueryTimeout(seconds);
+ }
+
+ @Override
+ public void cancel() throws SQLException {
+ stmt.cancel();
+ }
+
+ @Override
+ public SQLWarning getWarnings() throws SQLException {
+ return stmt.getWarnings();
+ }
+
+ @Override
+ public void clearWarnings() throws SQLException {
+ stmt.clearWarnings();
+ }
+
+ @Override
+ public void setCursorName(String name) throws SQLException {
+ stmt.setCursorName(name);
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ // Note: this seems to be the only call Camunda uses, so it is the only one I am fixing for now.
+ boolean ignore = nm.startsWith(DATASTAX_PREFIX);
+// ignore |= (nm.startsWith("org.h2.jdbc.JdbcSQLException") && e.getMessage().contains("already exists"));
+ if (ignore) {
+ logger.warn("execute: exception (IGNORED) "+nm);
+ } else {
+ logger.warn("execute: exception "+nm);
+ throw e;
+ }
+ }
+ return b;
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return stmt.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return stmt.getUpdateCount();
+ }
+
+ @Override
+ public boolean getMoreResults() throws SQLException {
+ return stmt.getMoreResults();
+ }
+
+ @Override
+ public void setFetchDirection(int direction) throws SQLException {
+ stmt.setFetchDirection(direction);
+ }
+
+ @Override
+ public int getFetchDirection() throws SQLException {
+ return stmt.getFetchDirection();
+ }
+
+ @Override
+ public void setFetchSize(int rows) throws SQLException {
+ stmt.setFetchSize(rows);
+ }
+
+ @Override
+ public int getFetchSize() throws SQLException {
+ return stmt.getFetchSize();
+ }
+
+ @Override
+ public int getResultSetConcurrency() throws SQLException {
+ return stmt.getResultSetConcurrency();
+ }
+
+ @Override
+ public int getResultSetType() throws SQLException {
+ return stmt.getResultSetType();
+ }
+
+ @Override
+ public void addBatch(String sql) throws SQLException {
+ stmt.addBatch(sql);
+ }
+
+ @Override
+ public void clearBatch() throws SQLException {
+ stmt.clearBatch();
+ }
+
+ @Override
+ public int[] executeBatch() throws SQLException {
+ logger.debug("executeBatch");
+ int[] n = null;
+ try {
+ logger.warn("executeBatch() is not supported by MDBC; your results may be incorrect as a result.");
+ n = stmt.executeBatch();
+ synchronizeTables(null);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeBatch: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return stmt.getConnection();
+ }
+
+ @Override
+ public boolean getMoreResults(int current) throws SQLException {
+ return stmt.getMoreResults(current);
+ }
+
+ @Override
+ public ResultSet getGeneratedKeys() throws SQLException {
+ return stmt.getGeneratedKeys();
+ }
+
+ @Override
+ public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+ logger.debug("executeUpdate: "+sql);
+ int n = 0;
+ try {
+ mgr.preStatementHook(sql);
+ n = stmt.executeUpdate(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("executeUpdate: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return n;
+ }
+
+ @Override
+ public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, autoGeneratedKeys);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnIndexes);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public boolean execute(String sql, String[] columnNames) throws SQLException {
+ logger.debug("execute: "+sql);
+ boolean b = false;
+ try {
+ mgr.preStatementHook(sql);
+ b = stmt.execute(sql, columnNames);
+ mgr.postStatementHook(sql);
+ synchronizeTables(sql);
+ } catch (Exception e) {
+ String nm = e.getClass().getName();
+ logger.warn("execute: exception "+nm);
+ if (!nm.startsWith(DATASTAX_PREFIX))
+ throw e;
+ }
+ return b;
+ }
+
+ @Override
+ public int getResultSetHoldability() throws SQLException {
+ return stmt.getResultSetHoldability();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return stmt.isClosed();
+ }
+
+ @Override
+ public void setPoolable(boolean poolable) throws SQLException {
+ stmt.setPoolable(poolable);
+ }
+
+ @Override
+ public boolean isPoolable() throws SQLException {
+ return stmt.isPoolable();
+ }
+
+ @Override
+ public void closeOnCompletion() throws SQLException {
+ stmt.closeOnCompletion();
+ }
+
+ @Override
+ public boolean isCloseOnCompletion() throws SQLException {
+ return stmt.isCloseOnCompletion();
+ }
+
+ @Override
+ public ResultSet executeQuery() throws SQLException {
+ logger.debug("executeQuery");
+ return ((PreparedStatement)stmt).executeQuery();
+ }
+
+ @Override
+ public int executeUpdate() throws SQLException {
+ logger.debug("executeUpdate");
+ return ((PreparedStatement)stmt).executeUpdate();
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType) throws SQLException {
+ ((PreparedStatement)stmt).setNull(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+ ((PreparedStatement)stmt).setBoolean(parameterIndex, x);
+ }
+
+ @Override
+ public void setByte(int parameterIndex, byte x) throws SQLException {
+ ((PreparedStatement)stmt).setByte(parameterIndex, x);
+ }
+
+ @Override
+ public void setShort(int parameterIndex, short x) throws SQLException {
+ ((PreparedStatement)stmt).setShort(parameterIndex, x);
+ }
+
+ @Override
+ public void setInt(int parameterIndex, int x) throws SQLException {
+ ((PreparedStatement)stmt).setInt(parameterIndex, x);
+ }
+
+ @Override
+ public void setLong(int parameterIndex, long x) throws SQLException {
+ ((PreparedStatement)stmt).setLong(parameterIndex, x);
+ }
+
+ @Override
+ public void setFloat(int parameterIndex, float x) throws SQLException {
+ ((PreparedStatement)stmt).setFloat(parameterIndex, x);
+ }
+
+ @Override
+ public void setDouble(int parameterIndex, double x) throws SQLException {
+ ((PreparedStatement)stmt).setDouble(parameterIndex, x);
+ }
+
+ @Override
+ public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
+ ((PreparedStatement)stmt).setBigDecimal(parameterIndex, x);
+ }
+
+ @Override
+ public void setString(int parameterIndex, String x) throws SQLException {
+ ((PreparedStatement)stmt).setString(parameterIndex, x);
+ }
+
+ @Override
+ public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+ ((PreparedStatement)stmt).setBytes(parameterIndex, x);
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
+ ((PreparedStatement)stmt).setTimestamp(parameterIndex, x);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setUnicodeStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
+ ((PreparedStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void clearParameters() throws SQLException {
+ ((PreparedStatement)stmt).clearParameters();
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x) throws SQLException {
+ ((PreparedStatement)stmt).setObject(parameterIndex, x);
+ }
+
+ @Override
+ public boolean execute() throws SQLException {
+ return ((PreparedStatement)stmt).execute();
+ }
+
+ @Override
+ public void addBatch() throws SQLException {
+ ((PreparedStatement)stmt).addBatch();
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
+ ((PreparedStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setRef(int parameterIndex, Ref x) throws SQLException {
+ ((PreparedStatement)stmt).setRef(parameterIndex, x);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, Blob x) throws SQLException {
+ ((PreparedStatement)stmt).setBlob(parameterIndex, x);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Clob x) throws SQLException {
+ ((PreparedStatement)stmt).setClob(parameterIndex, x);
+ }
+
+ @Override
+ public void setArray(int parameterIndex, Array x) throws SQLException {
+ ((PreparedStatement)stmt).setArray(parameterIndex, x);
+ }
+
+ @Override
+ public ResultSetMetaData getMetaData() throws SQLException {
+ return ((PreparedStatement)stmt).getMetaData();
+ }
+
+ @Override
+ public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setDate(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
+ ((PreparedStatement)stmt).setTime(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterIndex, x, cal);
+ }
+
+ @Override
+ public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void setURL(int parameterIndex, URL x) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterIndex, x);
+ }
+
+ @Override
+ public ParameterMetaData getParameterMetaData() throws SQLException {
+ return ((CallableStatement)stmt).getParameterMetaData();
+ }
+
+ @Override
+ public void setRowId(int parameterIndex, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterIndex, x);
+ }
+
+ @Override
+ public void setNString(int parameterIndex, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterIndex, value);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterIndex, xmlObject);
+ }
+
+ @Override
+ public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterIndex, x);
+ }
+
+ @Override
+ public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterIndex, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterIndex, value);
+ }
+
+ @Override
+ public void setClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterIndex, inputStream);
+ }
+
+ @Override
+ public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterIndex, reader);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, scale);
+ }
+
+ @Override
+ public boolean wasNull() throws SQLException {
+ return ((CallableStatement)stmt).wasNull();
+ }
+
+ @Override
+ public String getString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterIndex);
+ }
+
+ @Override
+ public boolean getBoolean(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterIndex);
+ }
+
+ @Override
+ public byte getByte(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterIndex);
+ }
+
+ @Override
+ public short getShort(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterIndex);
+ }
+
+ @Override
+ public int getInt(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterIndex);
+ }
+
+ @Override
+ public long getLong(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterIndex);
+ }
+
+ @Override
+ public float getFloat(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterIndex);
+ }
+
+ @Override
+ public double getDouble(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterIndex);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex, scale);
+ }
+
+ @Override
+ public byte[] getBytes(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterIndex);
+ }
+
+ @Override
+ public Object getObject(int parameterIndex, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, map);
+ }
+
+ @Override
+ public Ref getRef(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterIndex);
+ }
+
+ @Override
+ public Blob getBlob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterIndex);
+ }
+
+ @Override
+ public Clob getClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterIndex);
+ }
+
+ @Override
+ public Array getArray(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterIndex);
+ }
+
+ @Override
+ public Date getDate(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterIndex, cal);
+ }
+
+ @Override
+ public Time getTime(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterIndex, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterIndex, cal);
+ }
+
+ @Override
+ public void registerOutParameter(int parameterIndex, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterIndex, sqlType, typeName);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, scale);
+ }
+
+ @Override
+ public void registerOutParameter(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).registerOutParameter(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public URL getURL(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterIndex);
+ }
+
+ @Override
+ public void setURL(String parameterName, URL val) throws SQLException {
+ ((CallableStatement)stmt).setURL(parameterName, val);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType);
+ }
+
+ @Override
+ public void setBoolean(String parameterName, boolean x) throws SQLException {
+ ((CallableStatement)stmt).setBoolean(parameterName, x);
+ }
+
+ @Override
+ public void setByte(String parameterName, byte x) throws SQLException {
+ ((CallableStatement)stmt).setByte(parameterName, x);
+ }
+
+ @Override
+ public void setShort(String parameterName, short x) throws SQLException {
+ ((CallableStatement)stmt).setShort(parameterName, x);
+ }
+
+ @Override
+ public void setInt(String parameterName, int x) throws SQLException {
+ ((CallableStatement)stmt).setInt(parameterName, x);
+ }
+
+ @Override
+ public void setLong(String parameterName, long x) throws SQLException {
+ ((CallableStatement)stmt).setLong(parameterName, x);
+ }
+
+ @Override
+ public void setFloat(String parameterName, float x) throws SQLException {
+ ((CallableStatement)stmt).setFloat(parameterName, x);
+ }
+
+ @Override
+ public void setDouble(String parameterName, double x) throws SQLException {
+ ((CallableStatement)stmt).setDouble(parameterName, x);
+ }
+
+ @Override
+ public void setBigDecimal(String parameterName, BigDecimal x) throws SQLException {
+ ((CallableStatement)stmt).setBigDecimal(parameterName, x);
+ }
+
+ @Override
+ public void setString(String parameterName, String x) throws SQLException {
+ ((CallableStatement)stmt).setString(parameterName, x);
+ }
+
+ @Override
+ public void setBytes(String parameterName, byte[] x) throws SQLException {
+ ((CallableStatement)stmt).setBytes(parameterName, x);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, int length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType, int scale) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType, scale);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x, int targetSqlType) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x, targetSqlType);
+ }
+
+ @Override
+ public void setObject(String parameterName, Object x) throws SQLException {
+ ((CallableStatement)stmt).setObject(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, int length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setDate(String parameterName, Date x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setDate(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTime(String parameterName, Time x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTime(parameterName, x, cal);
+ }
+
+ @Override
+ public void setTimestamp(String parameterName, Timestamp x, Calendar cal) throws SQLException {
+ ((CallableStatement)stmt).setTimestamp(parameterName, x, cal);
+ }
+
+ @Override
+ public void setNull(String parameterName, int sqlType, String typeName) throws SQLException {
+ ((CallableStatement)stmt).setNull(parameterName, sqlType, typeName);
+ }
+
+ @Override
+ public String getString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getString(parameterName);
+ }
+
+ @Override
+ public boolean getBoolean(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBoolean(parameterName);
+ }
+
+ @Override
+ public byte getByte(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getByte(parameterName);
+ }
+
+ @Override
+ public short getShort(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getShort(parameterName);
+ }
+
+ @Override
+ public int getInt(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getInt(parameterName);
+ }
+
+ @Override
+ public long getLong(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getLong(parameterName);
+ }
+
+ @Override
+ public float getFloat(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getFloat(parameterName);
+ }
+
+ @Override
+ public double getDouble(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDouble(parameterName);
+ }
+
+ @Override
+ public byte[] getBytes(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBytes(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName);
+ }
+
+ @Override
+ public Time getTime(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName);
+ }
+
+ @Override
+ public BigDecimal getBigDecimal(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBigDecimal(parameterName);
+ }
+
+ @Override
+ public Object getObject(String parameterName, Map<String, Class<?>> map) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, map);
+ }
+
+ @Override
+ public Ref getRef(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRef(parameterName);
+ }
+
+ @Override
+ public Blob getBlob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getBlob(parameterName);
+ }
+
+ @Override
+ public Clob getClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getClob(parameterName);
+ }
+
+ @Override
+ public Array getArray(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getArray(parameterName);
+ }
+
+ @Override
+ public Date getDate(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getDate(parameterName, cal);
+ }
+
+ @Override
+ public Time getTime(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTime(parameterName, cal);
+ }
+
+ @Override
+ public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException {
+ return ((CallableStatement)stmt).getTimestamp(parameterName, cal);
+ }
+
+ @Override
+ public URL getURL(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getURL(parameterName);
+ }
+
+ @Override
+ public RowId getRowId(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterIndex);
+ }
+
+ @Override
+ public RowId getRowId(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getRowId(parameterName);
+ }
+
+ @Override
+ public void setRowId(String parameterName, RowId x) throws SQLException {
+ ((CallableStatement)stmt).setRowId(parameterName, x);
+ }
+
+ @Override
+ public void setNString(String parameterName, String value) throws SQLException {
+ ((CallableStatement)stmt).setNString(parameterName, value);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value, long length) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, NClob value) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader, length);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream, long length) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream, length);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader, length);
+ }
+
+ @Override
+ public NClob getNClob(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterIndex);
+ }
+
+ @Override
+ public NClob getNClob(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNClob(parameterName);
+ }
+
+ @Override
+ public void setSQLXML(String parameterName, SQLXML xmlObject) throws SQLException {
+ ((CallableStatement)stmt).setSQLXML(parameterName, xmlObject);
+ }
+
+ @Override
+ public SQLXML getSQLXML(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterIndex);
+ }
+
+ @Override
+ public SQLXML getSQLXML(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getSQLXML(parameterName);
+ }
+
+ @Override
+ public String getNString(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterIndex);
+ }
+
+ @Override
+ public String getNString(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNString(parameterName);
+ }
+
+ @Override
+ public Reader getNCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getNCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getNCharacterStream(parameterName);
+ }
+
+ @Override
+ public Reader getCharacterStream(int parameterIndex) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterIndex);
+ }
+
+ @Override
+ public Reader getCharacterStream(String parameterName) throws SQLException {
+ return ((CallableStatement)stmt).getCharacterStream(parameterName);
+ }
+
+ @Override
+ public void setBlob(String parameterName, Blob x) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, x);
+ }
+
+ @Override
+ public void setClob(String parameterName, Clob x) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, x);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x, long length) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x, length);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader, long length) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader, length);
+ }
+
+ @Override
+ public void setAsciiStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setAsciiStream(parameterName, x);
+ }
+
+ @Override
+ public void setBinaryStream(String parameterName, InputStream x) throws SQLException {
+ ((CallableStatement)stmt).setBinaryStream(parameterName, x);
+ }
+
+ @Override
+ public void setCharacterStream(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setCharacterStream(parameterName, reader);
+ }
+
+ @Override
+ public void setNCharacterStream(String parameterName, Reader value) throws SQLException {
+ ((CallableStatement)stmt).setNCharacterStream(parameterName, value);
+ }
+
+ @Override
+ public void setClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setClob(parameterName, reader);
+ }
+
+ @Override
+ public void setBlob(String parameterName, InputStream inputStream) throws SQLException {
+ ((CallableStatement)stmt).setBlob(parameterName, inputStream);
+ }
+
+ @Override
+ public void setNClob(String parameterName, Reader reader) throws SQLException {
+ ((CallableStatement)stmt).setNClob(parameterName, reader);
+ }
+
+ @Override
+ public <T> T getObject(int parameterIndex, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterIndex, type);
+ }
+
+ @Override
+ public <T> T getObject(String parameterName, Class<T> type) throws SQLException {
+ return ((CallableStatement)stmt).getObject(parameterName, type);
+ }
+
+ private void synchronizeTables(String sql) {
+ if (sql == null || sql.trim().toLowerCase().startsWith("create")) {
+ if (mgr != null) {
+ try {
+ mgr.synchronizeTables();
+ } catch (QueryException e) {
+
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/Range.java b/src/main/java/com/att/research/mdbc/Range.java
new file mode 100644
index 0000000..4d80a51
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/Range.java
@@ -0,0 +1,34 @@
+package com.att.research.mdbc;
+
+import java.io.Serializable;
+
+
+/**
+ * This class represent a range of the whole database
+ * For now a range represents directly a table in Cassandra
+ * In the future we may decide to partition ranges differently
+ * @author Enrique Saurez
+ */
+public class Range implements Serializable {
+
+ private static final long serialVersionUID = 1610744496930800088L;
+
+ final public String table;
+
+ public Range(String table) {
+ this.table = table;
+ }
+
+ /**
+ * Compares to Range types
+ * @param other the other range against which this is compared
+ * @return the equality result
+ */
+ public boolean equal(Range other) {
+ return (table == other.table);
+ }
+
+ public boolean overlaps(Range other) {
+ return table == other.table;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/com/att/research/mdbc/RedoRow.java b/src/main/java/com/att/research/mdbc/RedoRow.java
new file mode 100644
index 0000000..c024fe7
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/RedoRow.java
@@ -0,0 +1,29 @@
+package com.att.research.mdbc;
+
+public class RedoRow {
+ private String redoTableName;
+ private String redoRowIndex;
+
+ public RedoRow(){}
+
+ public RedoRow(String redoTableName, String redoRowIndex){
+ this.redoRowIndex = redoRowIndex;
+ this.redoTableName = redoTableName;
+ }
+
+ public String getRedoTableName() {
+ return redoTableName;
+ }
+
+ public void setRedoTableName(String redoTableName) {
+ this.redoTableName = redoTableName;
+ }
+
+ public String getRedoRowIndex() {
+ return redoRowIndex;
+ }
+
+ public void setRedoRowIndex(String redoRowIndex) {
+ this.redoRowIndex = redoRowIndex;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/StateManager.java b/src/main/java/com/att/research/mdbc/StateManager.java
new file mode 100644
index 0000000..accd13a
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/StateManager.java
@@ -0,0 +1,205 @@
+package com.att.research.mdbc;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.logging.format.AppMessages;
+import com.att.research.logging.format.ErrorSeverity;
+import com.att.research.logging.format.ErrorTypes;
+import com.att.research.mdbc.mixins.MixinFactory;
+import com.att.research.mdbc.mixins.MusicInterface;
+import com.att.research.mdbc.mixins.MusicMixin;
+import com.att.research.mdbc.mixins.TxCommitProgress;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * \TODO Implement an interface for the server logic and a factory
+ * @author Enrique Saurez
+ */
+public class StateManager {
+
+ //\TODO We need to fix the auto-commit mode and multiple transactions with the same connection
+
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StateManager.class);
+
+ /**
+ * This is the interface used by all the MusicSqlManagers,
+ * that are created by the MDBC Server
+ * @see MusicInterface
+ */
+ private MusicInterface musicManager;
+ /**
+ * This is the Running Queries information table.
+ * It mainly contains information about the entities
+ * that have being committed so far.
+ */
+ private TxCommitProgress transactionInfo;
+
+ private Map<String,MdbcConnection> mdbcConnections;
+
+ private String sqlDatabase;
+
+ private String url;
+
+ private Properties info;
+
+ @SuppressWarnings("unused")
+ private DatabasePartition ranges;
+
+ public StateManager(String url, Properties info, DatabasePartition ranges, String sqlDatabase) throws MDBCServiceException {
+ this.sqlDatabase=sqlDatabase;
+ this.ranges=ranges;
+ this.url = url;
+ this.info = info;
+ this.transactionInfo = new TxCommitProgress();
+ //\fixme this is not really used, delete!
+ String cassandraUrl = info.getProperty(Configuration.KEY_CASSANDRA_URL, Configuration.CASSANDRA_URL_DEFAULT);
+ String mixin = info.getProperty(Configuration.KEY_MUSIC_MIXIN_NAME, Configuration.MUSIC_MIXIN_DEFAULT);
+ this.musicManager = MixinFactory.createMusicInterface(mixin, cassandraUrl, info,ranges);
+ this.musicManager.createKeyspace();
+ try {
+ this.musicManager.initializeMdbcDataStructures();
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ throw(e);
+ }
+ MusicMixin.loadProperties();
+ this.mdbcConnections = new HashMap<>();
+ initSqlDatabase();
+ }
+
+ protected void initSqlDatabase() throws MDBCServiceException {
+ try {
+ //\TODO: pass the driver as a variable
+ Class.forName("org.mariadb.jdbc.Driver");
+ }
+ catch (ClassNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ return;
+ }
+ try {
+ Connection sqlConnection = DriverManager.getConnection(this.url, this.info);
+ StringBuilder sql = new StringBuilder("CREATE DATABASE IF NOT EXISTS ")
+ .append(sqlDatabase)
+ .append(";");
+ Statement stmt = sqlConnection.createStatement();
+ stmt.execute(sql.toString());
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ throw new MDBCServiceException(e.getMessage());
+ }
+ }
+
+ public void CloseConnection(String connectionId){
+ //\TODO check if there is a race condition
+ if(mdbcConnections.containsKey(connectionId)) {
+ transactionInfo.deleteTxProgress(connectionId);
+ try {
+ Connection conn = mdbcConnections.get(connectionId);
+ if(conn!=null)
+ conn.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ }
+ mdbcConnections.remove(connectionId);
+ }
+ }
+
+ public void OpenConnection(String id, Properties information){
+ if(!mdbcConnections.containsKey(id)){
+ Connection sqlConnection;
+ MdbcConnection newConnection;
+ //Create connection to local SQL DB
+ //\TODO: create function to generate connection outside of open connection and get connection
+ try {
+ //\TODO: pass the driver as a variable
+ Class.forName("org.mariadb.jdbc.Driver");
+ }
+ catch (ClassNotFoundException e) {
+ // TODO Auto-generated catch block
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.GENERALSERVICEERROR);
+ return;
+ }
+ try {
+ sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info);
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ sqlConnection = null;
+ }
+ //Create MDBC connection
+ try {
+ newConnection = new MdbcConnection(id, this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges);
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ newConnection = null;
+ return;
+ }
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id);
+ transactionInfo.createNewTransactionTracker(id, sqlConnection);
+ if(newConnection != null) {
+ mdbcConnections.put(id,newConnection);
+ }
+ }
+ }
+
+ /**
+ * This function returns the connection to the corresponding transaction
+ * @param id of the transaction, created using
+ * @return
+ */
+ public Connection GetConnection(String id) {
+ if(mdbcConnections.containsKey(id)) {
+ //\TODO: Verify if this make sense
+ // Intent: reinitialize transaction progress, when it already completed the previous tx for the same connection
+ if(transactionInfo.isComplete(id)) {
+ transactionInfo.reinitializeTxProgress(id);
+ }
+ return mdbcConnections.get(id);
+ }
+
+ Connection sqlConnection;
+ MdbcConnection newConnection;
+ try {
+ //TODO: pass the driver as a variable
+ Class.forName("org.mariadb.jdbc.Driver");
+ }
+ catch (ClassNotFoundException e) {
+ // TODO Auto-generated catch block
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ }
+
+ //Create connection to local SQL DB
+ try {
+ sqlConnection = DriverManager.getConnection(this.url+"/"+this.sqlDatabase, this.info);
+ } catch (SQLException e) {
+ logger.error("sql connection was not created correctly");
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.QUERYERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ sqlConnection = null;
+ }
+ //Create MDBC connection
+ try {
+ newConnection = new MdbcConnection(id,this.url+"/"+this.sqlDatabase, sqlConnection, info, this.musicManager, transactionInfo,ranges);
+ } catch (MDBCServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage(),AppMessages.UNKNOWNERROR, ErrorSeverity.CRITICAL, ErrorTypes.QUERYERROR);
+ newConnection = null;
+ }
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connection created for connection: "+id);
+
+ transactionInfo.createNewTransactionTracker(id, sqlConnection);
+ if(newConnection != null) {
+ mdbcConnections.put(id,newConnection);
+ }
+ return newConnection;
+ }
+
+ public void InitializeSystem() {
+ //\TODO Prefetch data to system using the data ranges as guide
+ throw new UnsupportedOperationException("Function initialize system needs to be implemented id MdbcStateManager");
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/TableInfo.java b/src/main/java/com/att/research/mdbc/TableInfo.java
new file mode 100755
index 0000000..583ba73
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/TableInfo.java
@@ -0,0 +1,75 @@
+package com.att.research.mdbc;
+
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Information about a table in the local database. It consists of three ordered list, which should all have the
+ * same length. A list of column names, a list of DB column types, and a list of booleans specifying which columns are keys.
+ * @author Robert P. Eby
+ */
+public class TableInfo {
+ /** An ordered list of the column names in this table */
+ public List<String> columns;
+ /** An ordered list of the column types in this table; the types are integers taken from {@link java.sql.Types}. */
+ public List<Integer> coltype;
+ /** An ordered list of booleans indicating if a column is a primary key column or not. */
+ public List<Boolean> iskey;
+
+ /** Construct an (initially) empty TableInfo. */
+ public TableInfo() {
+ columns = new ArrayList<String>();
+ coltype = new ArrayList<Integer>();
+ iskey = new ArrayList<Boolean>();
+ }
+ /**
+ * Check whether the column whose name is <i>name</i> is a primary key column.
+ * @param name the column name
+ * @return true if it is, false otherwise
+ */
+ public boolean iskey(String name) {
+ for (int i = 0; i < columns.size(); i++) {
+ if (this.columns.get(i).equalsIgnoreCase(name))
+ return this.iskey.get(i);
+ }
+ return false;
+ }
+ /**
+ * Get the type of the column whose name is <i>name</i>.
+ * @param name the column name
+ * @return the column type or Types.NULL
+ */
+ public int getColType(String name) {
+ for (int i = 0; i < columns.size(); i++) {
+ if (this.columns.get(i).equalsIgnoreCase(name))
+ return this.coltype.get(i);
+ }
+ return Types.NULL;
+ }
+
+ /**
+ * Checks if this table has a primary key
+ * @return
+ */
+ public boolean hasKey() {
+ for (Boolean b: iskey) {
+ if (b) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public List<String> getKeyColumns(){
+ List<String> keys = new ArrayList<String>();
+ int idx = 0;
+ for (Boolean b: iskey) {
+ if (b) {
+ keys.add(this.columns.get(idx));
+ }
+ idx++;
+ }
+ return keys;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java b/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java
new file mode 100644
index 0000000..78850e3
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/NodeConfiguration.java
@@ -0,0 +1,71 @@
+package com.att.research.mdbc.configurations;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.MDBCUtils;
+import com.att.research.mdbc.Range;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class NodeConfiguration {
+
+ private static transient final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(NodeConfiguration.class);
+
+ public String sqlDatabaseName;
+ public DatabasePartition partition;
+ public String nodeName;
+
+ public NodeConfiguration(String tables, String titIndex, String titTableName, String partitionId, String sqlDatabaseName, String node, String redoRecordsTable){
+ partition = new DatabasePartition(toRanges(tables), titIndex, titTableName, partitionId, null, redoRecordsTable) ;
+ this.sqlDatabaseName = sqlDatabaseName;
+ this.nodeName = node;
+ }
+
+ protected Set<Range> toRanges(String tables){
+ Set<Range> newRange = new HashSet<>();
+ String[] tablesArray=tables.split(",");
+ for(String table: tablesArray) {
+ newRange.add(new Range(table));
+ }
+ return newRange;
+ }
+
+ public String toJson() {
+ GsonBuilder builder = new GsonBuilder();
+ builder.setPrettyPrinting().serializeNulls();;
+ Gson gson = builder.create();
+ return gson.toJson(this);
+ }
+
+ public void saveToFile(String file){
+ try {
+ String serialized = this.toJson();
+ MDBCUtils.saveToFile(serialized,file,LOG);
+ } catch (IOException e) {
+ e.printStackTrace();
+ // Exit with error
+ System.exit(1);
+ }
+ }
+
+ public static NodeConfiguration readJsonFromFile( String filepath) throws FileNotFoundException {
+ BufferedReader br;
+ try {
+ br = new BufferedReader(
+ new FileReader(filepath));
+ } catch (FileNotFoundException e) {
+ LOG.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e);
+ throw e;
+ }
+ Gson gson = new Gson();
+ NodeConfiguration config = gson.fromJson(br, NodeConfiguration.class);
+ return config;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java b/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java
new file mode 100644
index 0000000..0d28b51
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/TablesConfiguration.java
@@ -0,0 +1,180 @@
+package com.att.research.mdbc.configurations;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabaseOperations;
+import com.att.research.mdbc.RedoRow;
+import com.att.research.mdbc.mixins.CassandraMixin;
+import com.google.gson.Gson;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.util.ArrayList;
+import java.util.List;
+
+public class TablesConfiguration {
+
+ private final String TIT_TABLE_NAME = "transactioninformation";
+ private final String REDO_RECORDS_NAME = "redorecords";
+
+ private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TablesConfiguration.class);
+ private List<PartitionInformation> partitions;
+ private String internalNamespace;
+ private int internalReplicationFactor;
+ private String musicNamespace;
+ private String tableToPartitionName;
+ private String partitionInformationTableName;
+ private String redoHistoryTableName;
+ private String sqlDatabaseName;
+
+ public TablesConfiguration(){}
+
+ /**
+ * This functions initalize all the corresponding tables and rows
+ * @return a list of node configurations to be used when starting each of the servers
+ * @throws MDBCServiceException
+ * @apiNote This function assumes that when used, there is not associated redo history in the tables to the tables that are going to be managed by this configuration file
+ */
+ public List<NodeConfiguration> initializeAndCreateNodeConfigurations() throws MDBCServiceException {
+ initInternalNamespace();
+ DatabaseOperations.createNamespace(musicNamespace, internalReplicationFactor);
+ List<NodeConfiguration> nodeConfigs = new ArrayList<>();
+ String ttpName = (tableToPartitionName==null || tableToPartitionName.isEmpty())?CassandraMixin.TABLE_TO_PARTITION_TABLE_NAME:tableToPartitionName;
+ DatabaseOperations.CreateTableToPartitionTable(musicNamespace,ttpName);
+ String pitName = (partitionInformationTableName==null || partitionInformationTableName.isEmpty())?CassandraMixin.PARTITION_INFORMATION_TABLE_NAME:partitionInformationTableName;
+ DatabaseOperations.CreatePartitionInfoTable(musicNamespace,pitName);
+ String rhName = (redoHistoryTableName==null || redoHistoryTableName.isEmpty())?CassandraMixin.REDO_HISTORY_TABLE_NAME:redoHistoryTableName;
+ DatabaseOperations.CreateRedoHistoryTable(musicNamespace,rhName);
+ if(partitions == null){
+ logger.error("Partitions was not correctly initialized");
+ throw new MDBCServiceException("Partition was not correctly initialized");
+ }
+ for(PartitionInformation partitionInfo : partitions){
+ String titTableName = partitionInfo.titTableName;
+ titTableName = (titTableName==null || titTableName.isEmpty())?TIT_TABLE_NAME:titTableName;
+ //0) Create the corresponding TIT table
+ DatabaseOperations.CreateTransactionInformationTable(musicNamespace,titTableName);
+ String redoRecordsName = partitionInfo.rrtTableName;
+ redoRecordsName = (redoRecordsName==null || redoRecordsName.isEmpty())?REDO_RECORDS_NAME:redoRecordsName;
+ DatabaseOperations.CreateRedoRecordsTable(-1,musicNamespace,redoRecordsName);
+ //0) Create the corresponding TIT table
+ String partitionId;
+ if(partitionInfo.partitionId==null || partitionInfo.partitionId.isEmpty()){
+ if(partitionInfo.replicationFactor==0){
+ logger.error("Replication factor and partition id are both empty, and this is an invalid configuration" );
+ throw new MDBCServiceException("Replication factor and partition id are both empty, and this is an invalid configuration");
+ }
+ //1) Create a row in the partition info table
+ partitionId = DatabaseOperations.createPartitionInfoRow(musicNamespace,pitName,partitionInfo.replicationFactor,partitionInfo.tables,null);
+
+ }
+ else{
+ partitionId = partitionInfo.partitionId;
+ }
+ //2) Create a row in the transaction information table
+ String titIndex = DatabaseOperations.CreateEmptyTitRow(musicNamespace,titTableName,partitionId,null);
+ //3) Add owner and tit information to partition info table
+ RedoRow newRedoRow = new RedoRow(titTableName,titIndex);
+ DatabaseOperations.updateRedoRow(musicNamespace,pitName,partitionId,newRedoRow,partitionInfo.owner,null);
+ //4) Update ttp with the new partition
+ for(String table: partitionInfo.tables) {
+ DatabaseOperations.updateTableToPartition(musicNamespace, ttpName, table, partitionId, null);
+ }
+ //5) Add it to the redo history table
+ DatabaseOperations.createRedoHistoryBeginRow(musicNamespace,rhName,newRedoRow,partitionId,null);
+ //6) Create config for this node
+ nodeConfigs.add(new NodeConfiguration(String.join(",",partitionInfo.tables),titIndex,titTableName,partitionId,sqlDatabaseName,partitionInfo.owner,redoRecordsName));
+ }
+ return nodeConfigs;
+ }
+
+ private void initInternalNamespace() throws MDBCServiceException {
+ DatabaseOperations.createNamespace(internalNamespace,internalReplicationFactor);
+ StringBuilder createKeysTableCql = new StringBuilder("CREATE TABLE IF NOT EXISTS ")
+ .append(internalNamespace)
+ .append(".unsynced_keys (key text PRIMARY KEY);");
+ PreparedQueryObject queryObject = new PreparedQueryObject();
+ queryObject.appendQueryString(createKeysTableCql.toString());
+ try {
+ MusicPureCassaCore.createTable(internalNamespace,"unsynced_keys", queryObject,"critical");
+ } catch (MusicServiceException e) {
+ logger.error("Error creating unsynced keys table" );
+ throw new MDBCServiceException("Error creating unsynced keys table");
+ }
+ }
+
+ public static TablesConfiguration readJsonFromFile(String filepath) throws FileNotFoundException {
+ BufferedReader br;
+ try {
+ br = new BufferedReader(
+ new FileReader(filepath));
+ } catch (FileNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"File was not found when reading json"+e);
+ throw e;
+ }
+ Gson gson = new Gson();
+ TablesConfiguration config = gson.fromJson(br, TablesConfiguration.class);
+ return config;
+ }
+
+ public class PartitionInformation{
+ private List<String> tables;
+ private String owner;
+ private String titTableName;
+ private String rrtTableName;
+ private String partitionId;
+ private int replicationFactor;
+
+ public List<String> getTables() {
+ return tables;
+ }
+
+ public void setTables(List<String> tables) {
+ this.tables = tables;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public void setOwner(String owner) {
+ this.owner = owner;
+ }
+
+ public String getTitTableName() {
+ return titTableName;
+ }
+
+ public void setTitTableName(String titTableName) {
+ this.titTableName = titTableName;
+ }
+
+ public String getPartitionId() {
+ return partitionId;
+ }
+
+ public void setPartitionId(String partitionId) {
+ this.partitionId = partitionId;
+ }
+
+ public int getReplicationFactor() {
+ return replicationFactor;
+ }
+
+ public void setReplicationFactor(int replicationFactor) {
+ this.replicationFactor = replicationFactor;
+ }
+
+ public String getRrtTableName(){
+ return rrtTableName;
+ }
+
+ public void setRrtTableName(String rrtTableName) {
+ this.rrtTableName = rrtTableName;
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/config-0.json b/src/main/java/com/att/research/mdbc/configurations/config-0.json
new file mode 100644
index 0000000..96d947c
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/config-0.json
@@ -0,0 +1,16 @@
+{
+ "sqlDatabaseName": "test",
+ "partition": {
+ "transactionInformationTable": "transactioninformation",
+ "transactionInformationIndex": "259a7a7c-f741-44ae-8d6e-227a02ddc96e",
+ "redoRecordsTable": "redorecords",
+ "partitionId": "ad766447-1adf-4800-aade-9f31a356ab4b",
+ "lockId": "",
+ "ranges": [
+ {
+ "table": "table11"
+ }
+ ]
+ },
+ "nodeName": ""
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/ranges.json b/src/main/java/com/att/research/mdbc/configurations/ranges.json
new file mode 100644
index 0000000..afa343b
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/ranges.json
@@ -0,0 +1,14 @@
+{
+ "transactionInformationTable": "transactioninformation",
+ "transactionInformationIndex": "d0e8ef2e-aeca-4261-8d9d-1679f560b85b",
+ "partitionId": "798110cf-9c61-4db2-9446-cb2dbab5a143",
+ "lockId": "",
+ "ranges": [
+ {
+ "table": "table1"
+ },
+ {
+ "table": "table2"
+ }
+ ]
+}
diff --git a/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json b/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json
new file mode 100644
index 0000000..b3c6224
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/configurations/tableConfiguration.json
@@ -0,0 +1,19 @@
+{
+ "partitions" : [
+ {
+ "tables":["table11"],
+ "owner":"",
+ "titTableName":"transactioninformation",
+ "rrtTableName":"redorecords",
+ "partitionId":"",
+ "replicationFactor":1
+ }
+ ],
+ "musicNamespace":"namespace",
+ "tableToPartitionName":"tabletopartition",
+ "partitionInformationTableName":"partitioninfo",
+ "redoHistoryTableName":"redohistory",
+ "sqlDatabaseName":"test",
+ "internalNamespace":"music_internal",
+ "internalReplicationFactor":1
+}
diff --git a/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java b/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java
new file mode 100644
index 0000000..cb43efe
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/examples/EtdbTestClient.java
@@ -0,0 +1,125 @@
+package com.att.research.mdbc.examples;
+
+import java.sql.*;
+import org.apache.calcite.avatica.remote.Driver;
+
+public class EtdbTestClient {
+
+ public static class Hr {
+ public final Employee[] emps = {
+ new Employee(100, "Bill"),
+ new Employee(200, "Eric"),
+ new Employee(150, "Sebastian"),
+ };
+ }
+
+ public static class Employee {
+ public final int empid;
+ public final String name;
+
+ public Employee(int empid, String name) {
+ this.empid = empid;
+ this.name = name;
+ }
+ }
+
+ public static void main(String[] args){
+ try {
+ Class.forName("org.apache.calcite.avatica.remote.Driver");
+ } catch (ClassNotFoundException e) {
+ e.printStackTrace();
+ System.exit(1);
+ }
+ Connection connection;
+ try {
+ connection = DriverManager.getConnection("jdbc:avatica:remote:url=http://localhost:30000;serialization=protobuf");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ connection.setAutoCommit(false);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+
+ final String sql = "CREATE TABLE IF NOT EXISTS Persons (\n" +
+ " PersonID int,\n" +
+ " LastName varchar(255),\n" +
+ " FirstName varchar(255),\n" +
+ " Address varchar(255),\n" +
+ " City varchar(255)\n" +
+ ");";
+ Statement stmt;
+ try {
+ stmt = connection.createStatement();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ boolean execute;
+ try {
+ execute = stmt.execute(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ if (execute) {
+ try {
+ connection.commit();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+
+ final String insertSQL = "INSERT INTO Persons VALUES (1, 'Martinez', 'Juan', 'KACB', 'ATLANTA');";
+ Statement insertStmt;
+ try {
+ insertStmt = connection.createStatement();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ execute = insertStmt.execute(insertSQL);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ connection.commit();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ return;
+ }
+
+ try {
+ stmt.close();
+ insertStmt.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+
+ try {
+ connection.commit();
+ connection.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+
+
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java b/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java
new file mode 100755
index 0000000..cc67edf
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/Cassandra2Mixin.java
@@ -0,0 +1,287 @@
+package com.att.research.mdbc.mixins;
+
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.json.JSONObject;
+import org.json.JSONTokener;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+import org.onap.music.main.ReturnType;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.TableInfo;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+
+/**
+ * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence
+ * to calls to the user's DB. It stores dirty row references in one table (called DIRTY____) rather than one dirty
+ * table per real table (as {@link com.att.research.mdbc.mixins.CassandraMixin} does).
+ *
+ * @author Robert P. Eby
+ */
+public class Cassandra2Mixin extends CassandraMixin {
+ private static final String DIRTY_TABLE = "DIRTY____"; // it seems Cassandra won't allow __DIRTY__
+ private boolean dirty_table_created = false;
+
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Cassandra2Mixin.class);
+
+ public Cassandra2Mixin() {
+ super();
+ }
+
+ public Cassandra2Mixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException {
+ super(url, info,ranges);
+ }
+
+ /**
+ * Get the name of this MusicInterface mixin object.
+ * @return the name
+ */
+ @Override
+ public String getMixinName() {
+ return "cassandra2";
+ }
+ /**
+ * Do what is needed to close down the MUSIC connection.
+ */
+ @Override
+ public void close() {
+ super.close();
+ }
+
+ /**
+ * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables.
+ * The keyspace name comes from the initialization properties passed to the JDBC driver.
+ */
+ @Override
+ public void createKeyspace() {
+ super.createKeyspace();
+ }
+
+ /**
+ * This method performs all necessary initialization in Music/Cassandra to store the table <i>tableName</i>.
+ * @param tableName the table to initialize MUSIC for
+ */
+ @Override
+ public void initializeMusicForTable(TableInfo ti, String tableName) {
+ super.initializeMusicForTable(ti, tableName);
+ }
+
+ /**
+ * Create a <i>dirty row</i> table for the real table <i>tableName</i>. The primary keys columns from the real table are recreated in
+ * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC.
+ * @param tableName the table to create a "dirty" table for
+ */
+ @Override
+ public void createDirtyRowTable(TableInfo ti, String tableName) {
+ if (!dirty_table_created) {
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (tablename TEXT, replica TEXT, keyset TEXT, PRIMARY KEY(tablename, replica, keyset));", music_ns, DIRTY_TABLE);
+ executeMusicWriteQuery(cql);
+ dirty_table_created = true;
+ }
+ }
+ /**
+ * Drop the dirty row table for <i>tableName</i> from MUSIC.
+ * @param tableName the table being dropped
+ */
+ @Override
+ public void dropDirtyRowTable(String tableName) {
+ // no-op
+ }
+
+ private String buildJSON(TableInfo ti, String tableName, Object[] keys) {
+ // Build JSON string representing this keyset
+ JSONObject jo = new JSONObject();
+ int j = 0;
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ jo.put(ti.columns.get(i), keys[j++]);
+ }
+ }
+ return jo.toString();
+ }
+ /**
+ * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys
+ * @param tableName the table we are removing dirty entries from
+ * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row.
+ */
+ @Override
+ public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ String cql = String.format("DELETE FROM %s.%s WHERE tablename = ? AND replica = ? AND keyset = ?;", music_ns, DIRTY_TABLE);
+ //Session sess = getMusicSession();
+ //PreparedStatement ps = getPreparedStatementFromCache(cql);
+ Object[] values = new Object[] { tableName, myId, keys };
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]);
+
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(myId);
+ pQueryObject.addValue(keys);
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage());
+ }
+ /*BoundStatement bound = ps.bind(values);
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ }
+ /**
+ * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica,
+ * and consist of a Map of primary key column names and values.
+ * @param tableName the table we are querying for
+ * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row".
+ */
+ @SuppressWarnings("deprecation")
+ @Override
+ public List<Map<String,Object>> getDirtyRows(TableInfo ti, String tableName) {
+ String cql = String.format("SELECT keyset FROM %s.%s WHERE tablename = ? AND replica = ?;", music_ns, DIRTY_TABLE);
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + tableName + " " + myId);
+
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(myId);
+ ResultSet results = null;
+ try {
+ results = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ }
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(new Object[] { tableName, myId });
+ bound.setReadTimeoutMillis(60000);
+ ResultSet results = null;
+ synchronized (sess) {
+ results = sess.execute(bound);
+ }*/
+ List<Map<String,Object>> list = new ArrayList<Map<String,Object>>();
+ for (Row row : results) {
+ String json = row.getString("keyset");
+ JSONObject jo = new JSONObject(new JSONTokener(json));
+ Map<String,Object> objs = new HashMap<String,Object>();
+ for (String colname : jo.keySet()) {
+ int coltype = ti.getColType(colname);
+ switch (coltype) {
+ case Types.BIGINT:
+ objs.put(colname, jo.getLong(colname));
+ break;
+ case Types.BOOLEAN:
+ objs.put(colname, jo.getBoolean(colname));
+ break;
+ case Types.BLOB:
+ logger.error(EELFLoggerDelegate.errorLogger,"WE DO NOT SUPPORT BLOBS AS PRIMARY KEYS!! COLUMN NAME="+colname);
+ // throw an exception here???
+ break;
+ case Types.DOUBLE:
+ objs.put(colname, jo.getDouble(colname));
+ break;
+ case Types.INTEGER:
+ objs.put(colname, jo.getInt(colname));
+ break;
+ case Types.TIMESTAMP:
+ objs.put(colname, new Date(jo.getString(colname)));
+ break;
+ case Types.VARCHAR:
+ default:
+ objs.put(colname, jo.getString(colname));
+ break;
+ }
+ }
+ list.add(objs);
+ }
+ return list;
+ }
+
+ /**
+ * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first.
+ * @param tableName This is the table that has been dropped
+ */
+ @Override
+ public void clearMusicForTable(String tableName) {
+ super.clearMusicForTable(tableName);
+ }
+ /**
+ * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param oldRow This is a copy of the old row being deleted
+ */
+ public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) {
+ super.deleteFromEntityTableInMusic(ti, tableName, oldRow);
+ }
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local
+ * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL
+ * @param tableName This is the table on which the select is being performed
+ */
+ @Override
+ public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) {
+ super.readDirtyRowsAndUpdateDb(dbi, tableName);
+ }
+
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed
+ */
+ @Override
+ public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) {
+ super.updateDirtyRowAndEntityTableInMusic(ti, tableName, changedRow);
+ }
+
+ /**
+ * Mark rows as "dirty" in the dirty rows table for <i>tableName</i>. Rows are marked for all replicas but
+ * this one (this replica already has the up to date data).
+ * @param tableName the table we are marking dirty
+ * @param keys an ordered list of the values being put into the table. The values that correspond to the tables'
+ * primary key are copied into the dirty row table.
+ */
+ @Deprecated
+ public void markDirtyRow(TableInfo ti, String tableName, Object[] keys) {
+ String cql = String.format("INSERT INTO %s.%s (tablename, replica, keyset) VALUES (?, ?, ?);", music_ns, DIRTY_TABLE);
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);*/
+ @SuppressWarnings("unused")
+ Object[] values = new Object[] { tableName, "", buildJSON(ti, tableName, keys) };
+ PreparedQueryObject pQueryObject = null;
+ for (String repl : allReplicaIds) {
+ /*if (!repl.equals(myId)) {
+ values[1] = repl;
+ logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql + " with values " + values[0] + " " + values[1] + " " + values[2]);
+
+ BoundStatement bound = ps.bind(values);
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }
+ }*/
+ pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(repl);
+ pQueryObject.addValue(buildJSON(ti, tableName, keys));
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java b/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java
new file mode 100755
index 0000000..6684fe6
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/CassandraMixin.java
@@ -0,0 +1,1288 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.nio.ByteBuffer;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+
+import com.att.research.mdbc.*;
+import org.json.JSONObject;
+import org.onap.music.datastore.CassaLockStore;
+import org.onap.music.datastore.PreparedQueryObject;
+import org.onap.music.exceptions.MusicLockingException;
+import org.onap.music.exceptions.MusicQueryException;
+import org.onap.music.exceptions.MusicServiceException;
+import org.onap.music.main.MusicPureCassaCore;
+import org.onap.music.main.ResultType;
+import org.onap.music.main.ReturnType;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.ColumnDefinitions;
+import com.datastax.driver.core.DataType;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Row;
+import com.datastax.driver.core.Session;
+
+/**
+ * This class provides the methods that MDBC needs to access Cassandra directly in order to provide persistence
+ * to calls to the user's DB. It does not do any table or row locking.
+ *
+ * <p>This code only supports the following limited list of H2 and Cassandra data types:</p>
+ * <table summary="">
+ * <tr><th>H2 Data Type</th><th>Mapped to Cassandra Data Type</th></tr>
+ * <tr><td>BIGINT</td><td>BIGINT</td></tr>
+ * <tr><td>BOOLEAN</td><td>BOOLEAN</td></tr>
+ * <tr><td>CLOB</td><td>BLOB</td></tr>
+ * <tr><td>DOUBLE</td><td>DOUBLE</td></tr>
+ * <tr><td>INTEGER</td><td>INT</td></tr>
+ * <tr><td>TIMESTAMP</td><td>TIMESTAMP</td></tr>
+ * <tr><td>VARBINARY</td><td>BLOB</td></tr>
+ * <tr><td>VARCHAR</td><td>VARCHAR</td></tr>
+ * </table>
+ *
+ * @author Robert P. Eby
+ */
+public class CassandraMixin implements MusicInterface {
+ /** The property name to use to identify this replica to MusicSqlManager */
+ public static final String KEY_MY_ID = "myid";
+ /** The property name to use for the comma-separated list of replica IDs. */
+ public static final String KEY_REPLICAS = "replica_ids";
+ /** The property name to use to identify the IP address for Cassandra. */
+ public static final String KEY_MUSIC_ADDRESS = "music_address";
+ /** The property name to use to provide the replication factor for Cassandra. */
+ public static final String KEY_MUSIC_RFACTOR = "music_rfactor";
+ /** The property name to use to provide the replication factor for Cassandra. */
+ public static final String KEY_MUSIC_NAMESPACE = "music_namespace";
+ /** The default property value to use for the Cassandra keyspace. */
+ public static final String DEFAULT_MUSIC_KEYSPACE = "mdbc";
+ /** The default property value to use for the Cassandra IP address. */
+ public static final String DEFAULT_MUSIC_ADDRESS = "localhost";
+ /** The default property value to use for the Cassandra replication factor. */
+ public static final int DEFAULT_MUSIC_RFACTOR = 1;
+ /** The default primary string column, if none is provided. */
+ public static final String MDBC_PRIMARYKEY_NAME = "mdbc_cuid";
+ /** Type of the primary key, if none is defined by the user */
+ public static final String MDBC_PRIMARYKEY_TYPE = "uuid";
+ /** Namespace for the tables in MUSIC (Cassandra) */
+ public static final String DEFAULT_MUSIC_NAMESPACE = "namespace";
+
+ /** Name of the tables required for MDBC */
+ public static final String TABLE_TO_PARTITION_TABLE_NAME = "tabletopartition";
+ public static final String PARTITION_INFORMATION_TABLE_NAME = "partitioninfo";
+ public static final String REDO_HISTORY_TABLE_NAME= "redohistory";
+ //\TODO Add logic to change the names when required and create the tables when necessary
+ private String redoRecordTableName = "redorecords";
+ private String transactionInformationTableName = "transactioninformation";
+
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(CassandraMixin.class);
+
+ private static final Map<Integer, String> typemap = new HashMap<>();
+ static {
+ // We only support the following type mappings currently (from DB -> Cassandra).
+ // Anything else will likely cause a NullPointerException
+ typemap.put(Types.BIGINT, "BIGINT"); // aka. IDENTITY
+ typemap.put(Types.BLOB, "VARCHAR");
+ typemap.put(Types.BOOLEAN, "BOOLEAN");
+ typemap.put(Types.CLOB, "BLOB");
+ typemap.put(Types.DATE, "VARCHAR");
+ typemap.put(Types.DOUBLE, "DOUBLE");
+ typemap.put(Types.DECIMAL, "DECIMAL");
+ typemap.put(Types.INTEGER, "INT");
+ //typemap.put(Types.TIMESTAMP, "TIMESTAMP");
+ typemap.put(Types.SMALLINT, "SMALLINT");
+ typemap.put(Types.TIMESTAMP, "VARCHAR");
+ typemap.put(Types.VARBINARY, "BLOB");
+ typemap.put(Types.VARCHAR, "VARCHAR");
+ typemap.put(Types.CHAR, "VARCHAR");
+ //The "Hacks", these don't have a direct mapping
+ //typemap.put(Types.DATE, "VARCHAR");
+ //typemap.put(Types.DATE, "TIMESTAMP");
+ }
+
+ protected DatabasePartition ranges;
+ protected final String music_ns;
+ protected final String myId;
+ protected final String[] allReplicaIds;
+ private final String musicAddress;
+ private final int music_rfactor;
+ private MusicConnector mCon = null;
+ private Session musicSession = null;
+ private boolean keyspace_created = false;
+ private Map<String, PreparedStatement> ps_cache = new HashMap<>();
+ private Set<String> in_progress = Collections.synchronizedSet(new HashSet<String>());
+
+ public CassandraMixin() {
+ //this.logger = null;
+ this.musicAddress = null;
+ this.music_ns = null;
+ this.music_rfactor = 0;
+ this.myId = null;
+ this.allReplicaIds = null;
+ }
+
+ public CassandraMixin(String url, Properties info, DatabasePartition ranges) throws MusicServiceException {
+ this.ranges = ranges;
+ // Default values -- should be overridden in the Properties
+ // Default to using the host_ids of the various peers as the replica IDs (this is probably preferred)
+ this.musicAddress = info.getProperty(KEY_MUSIC_ADDRESS, DEFAULT_MUSIC_ADDRESS);
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: musicAddress="+musicAddress);
+
+ String s = info.getProperty(KEY_MUSIC_RFACTOR);
+ this.music_rfactor = (s == null) ? DEFAULT_MUSIC_RFACTOR : Integer.parseInt(s);
+
+ this.myId = info.getProperty(KEY_MY_ID, getMyHostId());
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: myId="+myId);
+
+
+ this.allReplicaIds = info.getProperty(KEY_REPLICAS, getAllHostIds()).split(",");
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: allReplicaIds="+info.getProperty(KEY_REPLICAS, this.myId));
+
+ this.music_ns = info.getProperty(KEY_MUSIC_NAMESPACE,DEFAULT_MUSIC_NAMESPACE);
+ logger.info(EELFLoggerDelegate.applicationLogger,"MusicSqlManager: music_ns="+music_ns);
+ transactionInformationTableName = "transactioninformation";
+ createMusicKeyspace();
+ }
+
+ private void createMusicKeyspace() throws MusicServiceException {
+
+ Map<String,Object> replicationInfo = new HashMap<>();
+ replicationInfo.put("'class'", "'SimpleStrategy'");
+ replicationInfo.put("'replication_factor'", music_rfactor);
+
+ PreparedQueryObject queryObject = new PreparedQueryObject();
+ queryObject.appendQueryString(
+ "CREATE KEYSPACE " + this.music_ns + " WITH REPLICATION = " + replicationInfo.toString().replaceAll("=", ":"));
+
+ try {
+ MusicPureCassaCore.nonKeyRelatedPut(queryObject, "eventual");
+ } catch (MusicServiceException e) {
+ if (e.getMessage().equals("Keyspace "+this.music_ns+" already exists")) {
+ // ignore
+ } else {
+ throw(e);
+ }
+ }
+ }
+
+ private String getMyHostId() {
+ ResultSet rs = executeMusicRead("SELECT HOST_ID FROM SYSTEM.LOCAL");
+ Row row = rs.one();
+ return (row == null) ? "UNKNOWN" : row.getUUID("HOST_ID").toString();
+ }
+ private String getAllHostIds() {
+ ResultSet results = executeMusicRead("SELECT HOST_ID FROM SYSTEM.PEERS");
+ StringBuilder sb = new StringBuilder(myId);
+ for (Row row : results) {
+ sb.append(",");
+ sb.append(row.getUUID("HOST_ID").toString());
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Get the name of this MusicInterface mixin object.
+ * @return the name
+ */
+ @Override
+ public String getMixinName() {
+ return "cassandra";
+ }
+ /**
+ * Do what is needed to close down the MUSIC connection.
+ */
+ @Override
+ public void close() {
+ if (musicSession != null) {
+ musicSession.close();
+ musicSession = null;
+ }
+ }
+ @Override
+ public void initializeMdbcDataStructures() throws MDBCServiceException {
+ try {
+ DatabaseOperations.CreateRedoRecordsTable(-1, music_ns, redoRecordTableName);//\TODO If we start partitioning the data base, we would need to use the redotable number
+ DatabaseOperations.CreateTransactionInformationTable(music_ns, transactionInformationTableName);
+ DatabaseOperations.CreateTableToPartitionTable(music_ns, TABLE_TO_PARTITION_TABLE_NAME);
+ DatabaseOperations.CreatePartitionInfoTable(music_ns, PARTITION_INFORMATION_TABLE_NAME);
+ DatabaseOperations.CreateRedoHistoryTable(music_ns, REDO_HISTORY_TABLE_NAME);
+ }
+ catch(MDBCServiceException e){
+ logger.error(EELFLoggerDelegate.errorLogger,"Error creating tables in MUSIC");
+ }
+ }
+
+ /**
+ * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables.
+ * The keyspace name comes from the initialization properties passed to the JDBC driver.
+ */
+ @Override
+ public void createKeyspace() {
+ if (keyspace_created == false) {
+ String cql = String.format("CREATE KEYSPACE IF NOT EXISTS %s WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : %d };", music_ns, music_rfactor);
+ executeMusicWriteQuery(cql);
+ keyspace_created = true;
+ }
+ }
+
+ /**
+ * This method performs all necessary initialization in Music/Cassandra to store the table <i>tableName</i>.
+ * @param tableName the table to initialize MUSIC for
+ */
+ @Override
+ public void initializeMusicForTable(TableInfo ti, String tableName) {
+ /**
+ * This code creates two tables for every table in SQL:
+ * (i) a table with the exact same name as the SQL table storing the SQL data.
+ * (ii) a "dirty bits" table that stores the keys in the Cassandra table that are yet to be
+ * updated in the SQL table (they were written by some other node).
+ */
+ StringBuilder fields = new StringBuilder();
+ StringBuilder prikey = new StringBuilder();
+ String pfx = "", pfx2 = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ fields.append(pfx)
+ .append(ti.columns.get(i))
+ .append(" ")
+ .append(typemap.get(ti.coltype.get(i)));
+ if (ti.iskey.get(i)) {
+ // Primary key column
+ prikey.append(pfx2).append(ti.columns.get(i));
+ pfx2 = ", ";
+ }
+ pfx = ", ";
+ }
+ if (prikey.length()==0) {
+ fields.append(pfx).append(MDBC_PRIMARYKEY_NAME)
+ .append(" ")
+ .append(MDBC_PRIMARYKEY_TYPE);
+ prikey.append("mdbc_cuid");
+ }
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.%s (%s, PRIMARY KEY (%s));", music_ns, tableName, fields.toString(), prikey.toString());
+ executeMusicWriteQuery(cql);
+ }
+
+ // **************************************************
+ // Dirty Tables (in MUSIC) methods
+ // **************************************************
+
+ /**
+ * Create a <i>dirty row</i> table for the real table <i>tableName</i>. The primary keys columns from the real table are recreated in
+ * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC.
+ * @param tableName the table to create a "dirty" table for
+ */
+ @Override
+ public void createDirtyRowTable(TableInfo ti, String tableName) {
+ // create dirtybitsTable at all replicas
+// for (String repl : allReplicaIds) {
+//// String dirtyRowsTableName = "dirty_"+tableName+"_"+allReplicaIds[i];
+//// String dirtyTableQuery = "CREATE TABLE IF NOT EXISTS "+music_ns+"."+ dirtyRowsTableName+" (dirtyRowKeys text PRIMARY KEY);";
+// cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s_%s (dirtyRowKeys TEXT PRIMARY KEY);", music_ns, tableName, repl);
+// executeMusicWriteQuery(cql);
+// }
+ StringBuilder ddl = new StringBuilder("REPLICA__ TEXT");
+ StringBuilder cols = new StringBuilder("REPLICA__");
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ // Only use the primary keys columns in the "Dirty" table
+ ddl.append(", ")
+ .append(ti.columns.get(i))
+ .append(" ")
+ .append(typemap.get(ti.coltype.get(i)));
+ cols.append(", ").append(ti.columns.get(i));
+ }
+ }
+ if(cols.length()==0) {
+ //fixme
+ System.err.println("Create dirty row table found no primary key");
+ }
+ ddl.append(", PRIMARY KEY(").append(cols).append(")");
+ String cql = String.format("CREATE TABLE IF NOT EXISTS %s.DIRTY_%s (%s);", music_ns, tableName, ddl.toString());
+ executeMusicWriteQuery(cql);
+ }
+ /**
+ * Drop the dirty row table for <i>tableName</i> from MUSIC.
+ * @param tableName the table being dropped
+ */
+ @Override
+ public void dropDirtyRowTable(String tableName) {
+ String cql = String.format("DROP TABLE %s.DIRTY_%s;", music_ns, tableName);
+ executeMusicWriteQuery(cql);
+ }
+ /**
+ * Mark rows as "dirty" in the dirty rows table for <i>tableName</i>. Rows are marked for all replicas but
+ * this one (this replica already has the up to date data).
+ * @param tableName the table we are marking dirty
+ * @param keys an ordered list of the values being put into the table. The values that correspond to the tables'
+ * primary key are copied into the dirty row table.
+ */
+ @Override
+ public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ Object[] keyObj = getObjects(ti,tableName, keys);
+ StringBuilder cols = new StringBuilder("REPLICA__");
+ PreparedQueryObject pQueryObject = null;
+ StringBuilder vals = new StringBuilder("?");
+ List<Object> vallist = new ArrayList<Object>();
+ vallist.add(""); // placeholder for replica
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ cols.append(", ").append(ti.columns.get(i));
+ vals.append(", ").append("?");
+ vallist.add(keyObj[i]);
+ }
+ }
+ if(cols.length()==0) {
+ //FIXME
+ System.err.println("markDIrtyRow need to fix primary key");
+ }
+ String cql = String.format("INSERT INTO %s.DIRTY_%s (%s) VALUES (%s);", music_ns, tableName, cols.toString(), vals.toString());
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);*/
+ String primaryKey;
+ if(ti.hasKey()) {
+ primaryKey = getMusicKeyFromRow(ti,tableName, keys);
+ }
+ else {
+ primaryKey = getMusicKeyFromRowWithoutPrimaryIndexes(ti,tableName, keys);
+ }
+ System.out.println("markDirtyRow: PK value: "+primaryKey);
+
+ Object pkObj = null;
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ pkObj = keyObj[i];
+ }
+ }
+ for (String repl : allReplicaIds) {
+ pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ pQueryObject.addValue(tableName);
+ pQueryObject.addValue(repl);
+ pQueryObject.addValue(pkObj);
+ updateMusicDB(tableName, primaryKey, pQueryObject);
+ //if (!repl.equals(myId)) {
+ /*logger.info(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql);
+ vallist.set(0, repl);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ //}
+
+ }
+ }
+ /**
+ * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys
+ * @param tableName the table we are removing dirty entries from
+ * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row.
+ */
+ @Override
+ public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ Object[] keysObjects = getObjects(ti,tableName,keys);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ StringBuilder cols = new StringBuilder("REPLICA__=?");
+ List<Object> vallist = new ArrayList<Object>();
+ vallist.add(myId);
+ int n = 0;
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ cols.append(" AND ").append(ti.columns.get(i)).append("=?");
+ vallist.add(keysObjects[n++]);
+ pQueryObject.addValue(keysObjects[n++]);
+ }
+ }
+ String cql = String.format("DELETE FROM %s.DIRTY_%s WHERE %s;", music_ns, tableName, cols.toString());
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql);
+ pQueryObject.appendQueryString(cql);
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while cleanDirtyRow..."+rt.getMessage());
+ }
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ }
+ /**
+ * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica,
+ * and consist of a Map of primary key column names and values.
+ * @param tableName the table we are querying for
+ * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row".
+ */
+ @Override
+ public List<Map<String,Object>> getDirtyRows(TableInfo ti, String tableName) {
+ String cql = String.format("SELECT * FROM %s.DIRTY_%s WHERE REPLICA__=?;", music_ns, tableName);
+ ResultSet results = null;
+ logger.debug(EELFLoggerDelegate.applicationLogger,"Executing MUSIC write:"+ cql);
+
+ /*Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(new Object[] { myId });
+ bound.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ results = sess.execute(bound);
+ }*/
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ try {
+ results = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+
+ e.printStackTrace();
+ }
+
+ ColumnDefinitions cdef = results.getColumnDefinitions();
+ List<Map<String,Object>> list = new ArrayList<Map<String,Object>>();
+ for (Row row : results) {
+ Map<String,Object> objs = new HashMap<String,Object>();
+ for (int i = 0; i < cdef.size(); i++) {
+ String colname = cdef.getName(i).toUpperCase();
+ String coltype = cdef.getType(i).getName().toString().toUpperCase();
+ if (!colname.equals("REPLICA__")) {
+ switch (coltype) {
+ case "BIGINT":
+ objs.put(colname, row.getLong(colname));
+ break;
+ case "BOOLEAN":
+ objs.put(colname, row.getBool(colname));
+ break;
+ case "BLOB":
+ objs.put(colname, row.getString(colname));
+ break;
+ case "DATE":
+ objs.put(colname, row.getString(colname));
+ break;
+ case "DOUBLE":
+ objs.put(colname, row.getDouble(colname));
+ break;
+ case "DECIMAL":
+ objs.put(colname, row.getDecimal(colname));
+ break;
+ case "INT":
+ objs.put(colname, row.getInt(colname));
+ break;
+ case "TIMESTAMP":
+ objs.put(colname, row.getTimestamp(colname));
+ break;
+ case "VARCHAR":
+ default:
+ objs.put(colname, row.getString(colname));
+ break;
+ }
+ }
+ }
+ list.add(objs);
+ }
+ return list;
+ }
+
+ /**
+ * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first.
+ * @param tableName This is the table that has been dropped
+ */
+ @Override
+ public void clearMusicForTable(String tableName) {
+ dropDirtyRowTable(tableName);
+ String cql = String.format("DROP TABLE %s.%s;", music_ns, tableName);
+ executeMusicWriteQuery(cql);
+ }
+ /**
+ * This function is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param oldRow This is a copy of the old row being deleted
+ */
+ @Override
+ public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) {
+ Object[] objects = getObjects(ti,tableName,oldRow);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ if (ti.hasKey()) {
+ assert(ti.columns.size() == objects.length);
+ } else {
+ assert(ti.columns.size()+1 == objects.length);
+ }
+
+ StringBuilder where = new StringBuilder();
+ List<Object> vallist = new ArrayList<Object>();
+ String pfx = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ where.append(pfx)
+ .append(ti.columns.get(i))
+ .append("=?");
+ vallist.add(objects[i]);
+ pQueryObject.addValue(objects[i]);
+ pfx = " AND ";
+ }
+ }
+ if (!ti.hasKey()) {
+ where.append(MDBC_PRIMARYKEY_NAME + "=?");
+ //\FIXME this is wrong, old row is not going to contain the UUID, this needs to be fixed
+ vallist.add(UUID.fromString((String) objects[0]));
+ pQueryObject.addValue(UUID.fromString((String) objects[0]));
+ }
+
+ String cql = String.format("DELETE FROM %s.%s WHERE %s;", music_ns, tableName, where.toString());
+ logger.error(EELFLoggerDelegate.errorLogger,"Executing MUSIC write:"+ cql);
+ pQueryObject.appendQueryString(cql);
+
+ /*PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ Session sess = getMusicSession();
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ String primaryKey = getMusicKeyFromRow(ti,tableName, oldRow);
+ if(MusicMixin.criticalTables.contains(tableName)) {
+ ReturnType rt = null;
+ try {
+ rt = MusicPureCassaCore.atomicPut(music_ns, tableName, primaryKey, pQueryObject, null);
+ } catch (MusicLockingException e) {
+ e.printStackTrace();
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ } catch (MusicQueryException e) {
+ e.printStackTrace();
+ }
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ } else {
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ }
+ // Mark the dirty rows in music for all the replicas but us
+ markDirtyRow(ti,tableName, oldRow);
+ }
+
+ public Set<String> getMusicTableSet(String ns) {
+ Set<String> set = new TreeSet<String>();
+ String cql = String.format("SELECT TABLE_NAME FROM SYSTEM_SCHEMA.TABLES WHERE KEYSPACE_NAME = '%s'", ns);
+ ResultSet rs = executeMusicRead(cql);
+ for (Row row : rs) {
+ set.add(row.getString("TABLE_NAME").toUpperCase());
+ }
+ return set;
+ }
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local
+ * dirty bits table to see if there are any keys in Cassandra whose value has not yet been sent to SQL
+ * @param tableName This is the table on which the select is being performed
+ */
+ @Override
+ public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) {
+ // Read dirty rows of this table from Music
+ TableInfo ti = dbi.getTableInfo(tableName);
+ List<Map<String,Object>> objlist = getDirtyRows(ti,tableName);
+ PreparedQueryObject pQueryObject = null;
+ String pre_cql = String.format("SELECT * FROM %s.%s WHERE ", music_ns, tableName);
+ List<Object> vallist = new ArrayList<Object>();
+ StringBuilder sb = new StringBuilder();
+ //\TODO Perform a batch operation instead of each row at a time
+ for (Map<String,Object> map : objlist) {
+ pQueryObject = new PreparedQueryObject();
+ sb.setLength(0);
+ vallist.clear();
+ String pfx = "";
+ for (String key : map.keySet()) {
+ sb.append(pfx).append(key).append("=?");
+ vallist.add(map.get(key));
+ pQueryObject.addValue(map.get(key));
+ pfx = " AND ";
+ }
+
+ String cql = pre_cql + sb.toString();
+ System.out.println("readDirtyRowsAndUpdateDb: cql: "+cql);
+ pQueryObject.appendQueryString(cql);
+ ResultSet dirtyRows = null;
+ try {
+ //\TODO Why is this an eventual put?, this should be an atomic
+ dirtyRows = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+
+ e.printStackTrace();
+ }
+ /*
+ Session sess = getMusicSession();
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(vallist.toArray());
+ bound.setReadTimeoutMillis(60000);
+ ResultSet dirtyRows = null;
+ synchronized (sess) {
+ dirtyRows = sess.execute(bound);
+ }*/
+ List<Row> rows = dirtyRows.all();
+ if (rows.isEmpty()) {
+ // No rows, the row must have been deleted
+ deleteRowFromSqlDb(dbi,tableName, map);
+ } else {
+ for (Row row : rows) {
+ writeMusicRowToSQLDb(dbi,tableName, row);
+ }
+ }
+ }
+ }
+
+ private void deleteRowFromSqlDb(DBInterface dbi, String tableName, Map<String, Object> map) {
+ dbi.deleteRowFromSqlDb(tableName, map);
+ TableInfo ti = dbi.getTableInfo(tableName);
+ List<Object> vallist = new ArrayList<Object>();
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ String col = ti.columns.get(i);
+ Object val = map.get(col);
+ vallist.add(val);
+ }
+ }
+ cleanDirtyRow(ti, tableName, new JSONObject(vallist));
+ }
+ /**
+ * This functions copies the contents of a row in Music into the corresponding row in the SQL table
+ * @param tableName This is the name of the table in both Music and swl
+ * @param musicRow This is the row in Music that is being copied into SQL
+ */
+ private void writeMusicRowToSQLDb(DBInterface dbi, String tableName, Row musicRow) {
+ // First construct the map of columns and their values
+ TableInfo ti = dbi.getTableInfo(tableName);
+ Map<String, Object> map = new HashMap<String, Object>();
+ List<Object> vallist = new ArrayList<Object>();
+ String rowid = tableName;
+ for (String col : ti.columns) {
+ Object val = getValue(musicRow, col);
+ map.put(col, val);
+ if (ti.iskey(col)) {
+ vallist.add(val);
+ rowid += "_" + val.toString();
+ }
+ }
+
+ logger.debug("Blocking rowid: "+rowid);
+ in_progress.add(rowid); // Block propagation of the following INSERT/UPDATE
+
+ dbi.insertRowIntoSqlDb(tableName, map);
+
+ logger.debug("Unblocking rowid: "+rowid);
+ in_progress.remove(rowid); // Unblock propagation
+
+// try {
+// String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString());
+// executeSQLWrite(sql);
+// } catch (SQLException e) {
+// logger.debug("Insert failed because row exists, do an update");
+// // TODO - rewrite this UPDATE command should not update key fields
+// String sql = String.format("UPDATE %s SET (%s) = (%s) WHERE %s", tableName, fields.toString(), values.toString(), where.toString());
+// try {
+// executeSQLWrite(sql);
+// } catch (SQLException e1) {
+// e1.printStackTrace();
+// }
+// }
+
+ ti = dbi.getTableInfo(tableName);
+ cleanDirtyRow(ti, tableName, new JSONObject(vallist));
+
+// String selectQuery = "select "+ primaryKeyName+" FROM "+tableName+" WHERE "+primaryKeyName+"="+primaryKeyValue+";";
+// java.sql.ResultSet rs = executeSQLRead(selectQuery);
+// String dbWriteQuery=null;
+// try {
+// if(rs.next()){//this entry is there, do an update
+// dbWriteQuery = "UPDATE "+tableName+" SET "+columnNameString+" = "+ valueString +"WHERE "+primaryKeyName+"="+primaryKeyValue+";";
+// }else
+// dbWriteQuery = "INSERT INTO "+tableName+" VALUES"+valueString+";";
+// executeSQLWrite(dbWriteQuery);
+// } catch (SQLException e) {
+// // ZZTODO Auto-generated catch block
+// e.printStackTrace();
+// }
+
+ //clean the music dirty bits table
+// String dirtyRowIdsTableName = music_ns+".DIRTY_"+tableName+"_"+myId;
+// String deleteQuery = "DELETE FROM "+dirtyRowIdsTableName+" WHERE dirtyRowKeys=$$"+primaryKeyValue+"$$;";
+// executeMusicWriteQuery(deleteQuery);
+ }
+ private Object getValue(Row musicRow, String colname) {
+ ColumnDefinitions cdef = musicRow.getColumnDefinitions();
+ DataType colType;
+ try {
+ colType= cdef.getType(colname);
+ }
+ catch(IllegalArgumentException e) {
+ logger.warn("Colname is not part of table metadata: "+e);
+ throw e;
+ }
+ String typeStr = colType.getName().toString().toUpperCase();
+ switch (typeStr) {
+ case "BIGINT":
+ return musicRow.getLong(colname);
+ case "BOOLEAN":
+ return musicRow.getBool(colname);
+ case "BLOB":
+ return musicRow.getString(colname);
+ case "DATE":
+ return musicRow.getString(colname);
+ case "DECIMAL":
+ return musicRow.getDecimal(colname);
+ case "DOUBLE":
+ return musicRow.getDouble(colname);
+ case "SMALLINT":
+ case "INT":
+ return musicRow.getInt(colname);
+ case "TIMESTAMP":
+ return musicRow.getTimestamp(colname);
+ case "UUID":
+ return musicRow.getUUID(colname);
+ default:
+ logger.error(EELFLoggerDelegate.errorLogger, "UNEXPECTED COLUMN TYPE: columname="+colname+", columntype="+typeStr);
+ // fall thru
+ case "VARCHAR":
+ return musicRow.getString(colname);
+ }
+ }
+
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates
+ * it to the other replicas.
+ *
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed
+ */
+ @Override
+ public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) {
+ // Build the CQL command
+ Object[] objects = getObjects(ti,tableName,changedRow);
+ StringBuilder fields = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ String rowid = tableName;
+ Object[] newrow = new Object[objects.length];
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ String pfx = "";
+ int keyoffset=0;
+ for (int i = 0; i < objects.length; i++) {
+ if (!ti.hasKey() && i==0) {
+ //We need to tack on cassandra's uid in place of a primary key
+ fields.append(MDBC_PRIMARYKEY_NAME);
+ values.append("?");
+ newrow[i] = UUID.fromString((String) objects[i]);
+ pQueryObject.addValue(newrow[i]);
+ keyoffset=-1;
+ pfx = ", ";
+ continue;
+ }
+ fields.append(pfx).append(ti.columns.get(i+keyoffset));
+ values.append(pfx).append("?");
+ pfx = ", ";
+ if (objects[i] instanceof byte[]) {
+ // Cassandra doesn't seem to have a Codec to translate a byte[] to a ByteBuffer
+ newrow[i] = ByteBuffer.wrap((byte[]) objects[i]);
+ pQueryObject.addValue(newrow[i]);
+ } else if (objects[i] instanceof Reader) {
+ // Cassandra doesn't seem to have a Codec to translate a Reader to a ByteBuffer either...
+ newrow[i] = ByteBuffer.wrap(readBytesFromReader((Reader) objects[i]));
+ pQueryObject.addValue(newrow[i]);
+ } else {
+ newrow[i] = objects[i];
+ pQueryObject.addValue(newrow[i]);
+ }
+ if (i+keyoffset>=0 && ti.iskey.get(i+keyoffset)) {
+ rowid += "_" + newrow[i].toString();
+ }
+ }
+
+ if (in_progress.contains(rowid)) {
+ // This call to updateDirtyRowAndEntityTableInMusic() was called as a result of a Cassandra -> H2 update; ignore
+ logger.debug(EELFLoggerDelegate.applicationLogger, "updateDirtyRowAndEntityTableInMusic: bypassing MUSIC update on "+rowid);
+
+ } else {
+ // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update
+ String cql = String.format("INSERT INTO %s.%s (%s) VALUES (%s);", music_ns, tableName, fields.toString(), values.toString());
+
+ pQueryObject.appendQueryString(cql);
+ String primaryKey = getMusicKeyFromRow(ti,tableName, changedRow);
+ updateMusicDB(tableName, primaryKey, pQueryObject);
+
+ /*PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind(newrow);
+ bound.setReadTimeoutMillis(60000);
+ Session sess = getMusicSession();
+ synchronized (sess) {
+ sess.execute(bound);
+ }*/
+ // Mark the dirty rows in music for all the replicas but us
+ markDirtyRow(ti,tableName, changedRow);
+ }
+ }
+
+
+
+ private byte[] readBytesFromReader(Reader rdr) {
+ StringBuilder sb = new StringBuilder();
+ try {
+ int ch;
+ while ((ch = rdr.read()) >= 0) {
+ sb.append((char)ch);
+ }
+ } catch (IOException e) {
+ logger.warn("readBytesFromReader: "+e);
+ }
+ return sb.toString().getBytes();
+ }
+
+ protected PreparedStatement getPreparedStatementFromCache(String cql) {
+ // Note: have to hope that the Session never changes!
+ if (!ps_cache.containsKey(cql)) {
+ Session sess = getMusicSession();
+ PreparedStatement ps = sess.prepare(cql);
+ ps_cache.put(cql, ps);
+ }
+ return ps_cache.get(cql);
+ }
+
+ /**
+ * This method gets a connection to Music
+ * @return the Cassandra Session to use
+ */
+ protected Session getMusicSession() {
+ // create cassandra session
+ if (musicSession == null) {
+ logger.info(EELFLoggerDelegate.applicationLogger, "Creating New Music Session");
+ mCon = new MusicConnector(musicAddress);
+ musicSession = mCon.getSession();
+ }
+ return musicSession;
+ }
+
+ /**
+ * This method executes a write query in Music
+ * @param cql the CQL to be sent to Cassandra
+ */
+ protected void executeMusicWriteQuery(String cql) {
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQueryObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Failure while eventualPut...: "+rt.getMessage());
+ }
+ /*Session sess = getMusicSession();
+ SimpleStatement s = new SimpleStatement(cql);
+ s.setReadTimeoutMillis(60000);
+ synchronized (sess) {
+ sess.execute(s);
+ }*/
+ }
+
+ /**
+ * This method executes a read query in Music
+ * @param cql the CQL to be sent to Cassandra
+ * @return a ResultSet containing the rows returned from the query
+ */
+ protected ResultSet executeMusicRead(String cql) {
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Executing MUSIC write:"+ cql);
+ PreparedQueryObject pQueryObject = new PreparedQueryObject();
+ pQueryObject.appendQueryString(cql);
+ ResultSet results = null;
+ try {
+ results = MusicPureCassaCore.get(pQueryObject);
+ } catch (MusicServiceException e) {
+
+ e.printStackTrace();
+ }
+ return results;
+ /*Session sess = getMusicSession();
+ synchronized (sess) {
+ return sess.execute(cql);
+ }*/
+ }
+
+ /**
+ * Returns the default primary key name that this mixin uses
+ */
+ public String getMusicDefaultPrimaryKeyName() {
+ return MDBC_PRIMARYKEY_NAME;
+ }
+
+ /**
+ * Return the function for cassandra's primary key generation
+ */
+ public String generateUniqueKey() {
+ return UUID.randomUUID().toString();
+ }
+
+ @Override
+ public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow) {
+ //\TODO this operation is super expensive to perform, both latency and BW
+ // it is better to add additional where clauses, and have the primary key
+ // to be composed of known columns of the table
+ // Adding this primary indexes would be an additional burden to the developers, which spanner
+ // also does, but otherwise performance is really bad
+ // At least it should have a set of columns that are guaranteed to be unique
+ StringBuilder cqlOperation = new StringBuilder();
+ cqlOperation.append("SELECT * FROM ")
+ .append(music_ns)
+ .append(".")
+ .append(table);
+ ResultSet musicResults = executeMusicRead(cqlOperation.toString());
+ Object[] dbRowObjects = getObjects(ti,table,dbRow);
+ while (!musicResults.isExhausted()) {
+ Row musicRow = musicResults.one();
+ if (rowIs(ti, musicRow, dbRowObjects)) {
+ return ((UUID)getValue(musicRow, MDBC_PRIMARYKEY_NAME)).toString();
+ }
+ }
+ //should never reach here
+ return null;
+ }
+
+ /**
+ * Checks to see if this row is in list of database entries
+ * @param ti
+ * @param musicRow
+ * @param dbRow
+ * @return
+ */
+ private boolean rowIs(TableInfo ti, Row musicRow, Object[] dbRow) {
+ //System.out.println("Comparing " + musicRow.toString());
+ boolean sameRow=true;
+ for (int i=0; i<ti.columns.size(); i++) {
+ Object val = getValue(musicRow, ti.columns.get(i));
+ if (!dbRow[i].equals(val)) {
+ sameRow=false;
+ break;
+ }
+ }
+ return sameRow;
+ }
+
+ @Override
+ public String getMusicKeyFromRow(TableInfo ti, String tableName, JSONObject row) {
+ List<String> keyCols = ti.getKeyColumns();
+ if(keyCols.isEmpty()){
+ throw new IllegalArgumentException("Table doesn't have defined primary indexes ");
+ }
+ StringBuilder key = new StringBuilder();
+ String pfx = "";
+ for(String keyCol: keyCols) {
+ key.append(pfx);
+ key.append(row.getString(keyCol));
+ pfx = ",";
+ }
+ String keyStr = key.toString();
+ return keyStr;
+ }
+
+ public void updateMusicDB(String tableName, String primaryKey, PreparedQueryObject pQObject) {
+ if(MusicMixin.criticalTables.contains(tableName)) {
+ ReturnType rt = null;
+ try {
+ rt = MusicPureCassaCore.atomicPut(music_ns, tableName, primaryKey, pQObject, null);
+ } catch (MusicLockingException e) {
+ e.printStackTrace();
+ } catch (MusicServiceException e) {
+ e.printStackTrace();
+ } catch (MusicQueryException e) {
+ e.printStackTrace();
+ }
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ } else {
+ ReturnType rt = MusicPureCassaCore.eventualPut(pQObject);
+ if(rt.getResult().getResult().toLowerCase().equals("failure")) {
+ System.out.println("Failure while critical put..."+rt.getMessage());
+ }
+ }
+ }
+
+
+ private PreparedQueryObject createAppendRRTIndexToTitQuery(String titTable, String uuid, String table, String redoUuid){
+ PreparedQueryObject query = new PreparedQueryObject();
+ StringBuilder appendBuilder = new StringBuilder();
+ appendBuilder.append("UPDATE ")
+ .append(music_ns)
+ .append(".")
+ .append(titTable)
+ .append(" SET redo = redo +[('")
+ .append(table)
+ .append("',")
+ .append(redoUuid)
+ .append(")] WHERE id = ")
+ .append(uuid)
+ .append(";");
+ query.appendQueryString(appendBuilder.toString());
+ return query;
+ }
+
+ protected String createAndAssignLock(String fullyQualifiedKey, DatabasePartition partition, String keyspace, String table, String key) throws MDBCServiceException {
+ String lockId;
+ lockId = MusicPureCassaCore.createLockReference(fullyQualifiedKey);
+ ReturnType lockReturn;
+ try {
+ lockReturn = MusicPureCassaCore.acquireLock(fullyQualifiedKey,lockId);
+ } catch (MusicLockingException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Lock was not acquire correctly for key "+fullyQualifiedKey);
+ throw new MDBCServiceException("Lock was not acquire correctly for key "+fullyQualifiedKey);
+ } catch (MusicServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey);
+ } catch (MusicQueryException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey);
+ }
+ //\TODO this is wrong, we should have a better way to obtain a lock forcefully, clean the queue and obtain the lock
+ if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) {
+ try {
+ MusicPureCassaCore.releaseLock(fullyQualifiedKey,lockId,false);
+ CassaLockStore lockingServiceHandle = MusicPureCassaCore.getLockingServiceHandle();
+ UUID uuid = lockingServiceHandle.peekLockQueue(keyspace, table, key);
+ String uuidStr = uuid.toString();
+ while(uuidStr != lockId) {
+ MusicPureCassaCore.releaseLock(fullyQualifiedKey, uuid.toString(), false);
+ try {
+ uuid = lockingServiceHandle.peekLockQueue(keyspace, table, key);
+ uuidStr = uuid.toString();
+ } catch(NullPointerException e){
+ //Ignore null pointer exception
+ lockId = MusicPureCassaCore.createLockReference(fullyQualifiedKey);
+ uuidStr = lockId;
+ }
+ }
+ lockReturn = MusicPureCassaCore.acquireLock(fullyQualifiedKey,lockId);
+
+ } catch (MusicLockingException e) {
+ throw new MDBCServiceException("Could not lock the corresponding lock");
+ } catch (MusicServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in music, when locking: "+fullyQualifiedKey);
+ } catch (MusicQueryException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Error in executing query music, when locking key: "+fullyQualifiedKey);
+ throw new MDBCServiceException("Error in executing query music, when locking: "+fullyQualifiedKey);
+ }
+ }
+ if(lockReturn.getResult().compareTo(ResultType.SUCCESS) != 0 ) {
+ throw new MDBCServiceException("Could not lock the corresponding lock");
+ }
+ //TODO: Java newbie here, verify that this lockId is actually assigned to the global DatabasePartition in the StateManager instance
+ partition.setLockId(lockId);
+ return lockId;
+ }
+
+ protected void pushRowToRRT(String lockId, String commitId, HashMap<Range,StagingTable> transactionDigest) throws MDBCServiceException{
+ PreparedQueryObject query = new PreparedQueryObject();
+ StringBuilder cqlQuery = new StringBuilder("INSERT INTO ")
+ .append(music_ns)
+ .append('.')
+ .append(redoRecordTableName)
+ .append(" (leaseid,leasecounter,transactiondigest) ")
+ .append("VALUES ('")
+ .append( lockId ).append("',")
+ .append( commitId ).append(",'");
+ try {
+ cqlQuery.append( MDBCUtils.toString(transactionDigest) );
+ } catch (IOException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId);
+ throw new MDBCServiceException("Transaction Digest serialization was invalid for commit "+commitId);
+ }
+ cqlQuery.append("');");
+ query.appendQueryString(cqlQuery.toString());
+ //\TODO check if I am not shooting on my own foot
+ try {
+ MusicPureCassaCore.nonKeyRelatedPut(query,"critical");
+ } catch (MusicServiceException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction Digest serialization was invalid for commit "+commitId);
+ throw new MDBCServiceException("Transaction Digest serialization for commit "+commitId);
+ }
+ }
+
+ protected void appendIndexToTit(String lockId, String commitId, String TITIndex) throws MDBCServiceException{
+ StringBuilder redoUuidBuilder = new StringBuilder();
+ redoUuidBuilder.append("('")
+ .append(lockId)
+ .append("',")
+ .append(commitId)
+ .append(")");
+ PreparedQueryObject appendQuery = createAppendRRTIndexToTitQuery(transactionInformationTableName, TITIndex, redoRecordTableName, redoUuidBuilder.toString());
+ ReturnType returnType = MusicPureCassaCore.criticalPut(music_ns, transactionInformationTableName, TITIndex, appendQuery, lockId, null);
+ if(returnType.getResult().compareTo(ResultType.SUCCESS) != 0 ){
+ logger.error(EELFLoggerDelegate.errorLogger, "Error when executing append operation with return type: "+returnType.getMessage());
+ throw new MDBCServiceException("Error when executing append operation with return type: "+returnType.getMessage());
+ }
+ }
+
+ @Override
+ public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap<Range,StagingTable> transactionDigest, String txId ,TxCommitProgress progressKeeper) throws MDBCServiceException{
+ String TITIndex = partition.getTransactionInformationIndex();
+ if(TITIndex.isEmpty()) {
+ //\TODO Fetch TITIndex from the Range Information Table
+ throw new MDBCServiceException("TIT Index retrieval not yet implemented");
+ }
+ String fullyQualifiedTitKey = music_ns+"."+ transactionInformationTableName +"."+TITIndex;
+ //0. See if reference to lock was already created
+ String lockId = partition.getLockId();
+ if(lockId == null || lockId.isEmpty()) {
+ lockId = createAndAssignLock(fullyQualifiedTitKey,partition,music_ns,transactionInformationTableName,TITIndex);
+ }
+
+ String commitId;
+ //Generate a local commit id
+ if(progressKeeper.containsTx(txId)) {
+ commitId = progressKeeper.getCommitId(txId).toString();
+ }
+ else{
+ logger.error(EELFLoggerDelegate.errorLogger, "Tx with id "+txId+" was not created in the TxCommitProgress ");
+ throw new MDBCServiceException("Tx with id "+txId+" was not created in the TxCommitProgress ");
+ }
+ //Add creation type of transaction digest
+
+ //1. Push new row to RRT and obtain its index
+ pushRowToRRT(lockId, commitId, transactionDigest);
+
+ //2. Save RRT index to RQ
+ if(progressKeeper!= null) {
+ progressKeeper.setRecordId(txId,new RedoRecordId(lockId, commitId));
+ }
+ //3. Append RRT index into the corresponding TIT row array
+ appendIndexToTit(lockId,commitId,TITIndex);
+ }
+
+ /**
+ * @param tableName
+ * @param string
+ * @param rowValues
+ * @return
+ */
+ @SuppressWarnings("unused")
+ private String getUid(String tableName, String string, Object[] rowValues) {
+ //
+ // Update local MUSIC node. Note: in Cassandra you can insert again on an existing key..it becomes an update
+ String cql = String.format("SELECT * FROM %s.%s;", music_ns, tableName);
+ PreparedStatement ps = getPreparedStatementFromCache(cql);
+ BoundStatement bound = ps.bind();
+ bound.setReadTimeoutMillis(60000);
+ Session sess = getMusicSession();
+ ResultSet rs;
+ synchronized (sess) {
+ rs = sess.execute(bound);
+ }
+
+ //
+ //should never reach here
+ logger.error(EELFLoggerDelegate.errorLogger, "Could not find the row in the primary key");
+
+ return null;
+ }
+
+ @Override
+ public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) {
+ // \FIXME: we may need to add the primary key of the row if it was autogenerated by MUSIC
+ List<String> cols = ti.columns;
+ int size = cols.size();
+ boolean hasDefault = false;
+ if(row.has(getMusicDefaultPrimaryKeyName())) {
+ size++;
+ hasDefault = true;
+ }
+
+ Object[] objects = new Object[size];
+ int idx = 0;
+ if(hasDefault) {
+ objects[idx++] = row.getString(getMusicDefaultPrimaryKeyName());
+ }
+ for(String col : ti.columns) {
+ objects[idx]=row.get(col);
+ }
+ return objects;
+ }
+
+ @Override
+ public TransactionInformationElement getTransactionInformation(String id){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TitReference createTransactionInformationRow(TransactionInformationElement info){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void appendToRedoLog(TitReference titRow, DatabasePartition partition, RedoRecordId newRecord){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void appendRedoRecord(String redoRecordTable, RedoRecordId newRecord, String transactionDigest){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updateTablePartition(String table, DatabasePartition partition){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TitReference createPartition(List<String> tables, int replicationFactor, String currentOwner){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updatePartitionOwner(String partition, String owner){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updateTitReference(String partition, TitReference tit){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void updatePartitionReplicationFactor(String partition, int replicationFactor){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addRedoHistory(DatabasePartition partition, TitReference newTit, List<TitReference> old){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<RedoHistoryElement> getHistory(DatabasePartition partition){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<PartitionInformation> getPartitionInformation(DatabasePartition partition){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public TablePartitionInformation getTablePartitionInformation(String table){
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public HashMap<Range,StagingTable> getTransactionDigest(RedoRecordId id){
+ throw new UnsupportedOperationException();
+ }
+
+ }
diff --git a/src/main/java/com/att/research/mdbc/mixins/DBInterface.java b/src/main/java/com/att/research/mdbc/mixins/DBInterface.java
new file mode 100755
index 0000000..9aa94f9
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/DBInterface.java
@@ -0,0 +1,91 @@
+package com.att.research.mdbc.mixins;
+
+import java.sql.ResultSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+
+/**
+ * This Interface defines the methods that MDBC needs in order to mirror data to/from a Database instance.
+ *
+ * @author Robert P. Eby
+ */
+public interface DBInterface {
+ /**
+ * Get the name of this DBnterface mixin object.
+ * @return the name
+ */
+ String getMixinName();
+ /**
+ * Do what is needed to close down the database connection.
+ */
+ void close();
+ /**
+ * Get a set of the table names in the database. The table names should be returned in UPPER CASE.
+ * @return the set
+ */
+ Set<String> getSQLTableSet();
+ /**
+ * Return the name of the database that the driver is connected to
+ * @return
+ */
+ String getDatabaseName();
+ /**
+ * Return a TableInfo object for the specified table.
+ * @param tableName the table to look up
+ * @return a TableInfo object containing the info we need, or null if the table does not exist
+ */
+ TableInfo getTableInfo(String tableName);
+ /**
+ * This method should create triggers in the database to be called for each row after every INSERT,
+ * UPDATE and DELETE, and before every SELECT.
+ * @param tableName this is the table on which triggers are being created.
+ */
+ void createSQLTriggers(String tableName);
+ /**
+ * This method should drop all triggers previously created in the database for the table.
+ * @param tableName this is the table on which triggers are being dropped.
+ */
+ void dropSQLTriggers(String tableName);
+ /**
+ * This method inserts a row into the SQL database, defined via a map of column names and values.
+ * @param tableName the table to insert the row into
+ * @param map map of column names &rarr; values to use for the keys when inserting the row
+ */
+ void insertRowIntoSqlDb(String tableName, Map<String, Object> map);
+ /**
+ * This method deletes a row from the SQL database, defined via a map of column names and values.
+ * @param tableName the table to delete the row from
+ * @param map map of column names &rarr; values to use for the keys when deleting the row
+ */
+ void deleteRowFromSqlDb(String tableName, Map<String, Object> map);
+ /**
+ * Code to be run within the DB driver before a SQL statement is executed. This is where tables
+ * can be synchronized before a SELECT, for those databases that do not support SELECT triggers.
+ * @param sql the SQL statement that is about to be executed
+ */
+ void preStatementHook(final String sql);
+ /**
+ * Code to be run within the DB driver after a SQL statement has been executed. This is where remote
+ * statement actions can be copied back to Cassandra/MUSIC.
+ * @param sql the SQL statement that was executed
+ * @param transactionDigest
+ */
+ void postStatementHook(final String sql,Map<Range,StagingTable> transactionDigest);
+ /**
+ * This method executes a read query in the SQL database. Methods that call this method should be sure
+ * to call resultset.getStatement().close() when done in order to free up resources.
+ * @param sql the query to run
+ * @return a ResultSet containing the rows returned from the query
+ */
+ ResultSet executeSQLRead(String sql);
+
+ void synchronizeData(String tableName);
+
+ List<String> getReservedTblNames();
+
+ String getPrimaryKey(String sql, String tableName);
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java b/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java
new file mode 100755
index 0000000..68d2986
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MixinFactory.java
@@ -0,0 +1,125 @@
+package com.att.research.mdbc.mixins;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.sql.Connection;
+import java.util.Properties;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.MusicSqlManager;
+
+/**
+ * This class is used to construct instances of Mixins that implement either the {@link com.att.research.mdbc.mixins.DBInterface}
+ * interface, or the {@link com.att.research.mdbc.mixins.MusicInterface} interface. The Mixins are searched for in the CLASSPATH.
+ *
+ * @author Robert P. Eby
+ */
+public class MixinFactory {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MixinFactory.class);
+
+ // Only static methods...
+ private MixinFactory(){}
+
+ /**
+ * Look for a class in CLASSPATH that implements the {@link DBInterface} interface, and has the mixin name <i>name</i>.
+ * If one is found, construct and return it, using the other arguments for the constructor.
+ * @param name the name of the Mixin
+ * @param msm the MusicSqlManager to use as an argument to the constructor
+ * @param url the URL to use as an argument to the constructor
+ * @param conn the underlying JDBC Connection
+ * @param info the Properties to use as an argument to the constructor
+ * @return the newly constructed DBInterface, or null if one cannot be found.
+ */
+ public static DBInterface createDBInterface(String name, MusicSqlManager msm, String url, Connection conn, Properties info) {
+ for (Class<?> cl : Utils.getClassesImplementing(DBInterface.class)) {
+ try {
+ Constructor<?> con = cl.getConstructor();
+ if (con != null) {
+ DBInterface dbi = (DBInterface) con.newInstance();
+ String miname = dbi.getMixinName();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Checking "+miname);
+ if (miname.equalsIgnoreCase(name)) {
+ con = cl.getConstructor(MusicSqlManager.class, String.class, Connection.class, Properties.class);
+ if (con != null) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname);
+ return (DBInterface) con.newInstance(msm, url, conn, info);
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createDBInterface: "+e);
+ }
+ }
+ return null;
+ }
+ /**
+ * Look for a class in CLASSPATH that implements the {@link MusicInterface} interface, and has the mixin name <i>name</i>.
+ * If one is found, construct and return it, using the other arguments for the constructor.
+ * @param name the name of the Mixin
+ * @param msm the MusicSqlManager to use as an argument to the constructor
+ * @param dbi the DBInterface to use as an argument to the constructor
+ * @param url the URL to use as an argument to the constructor
+ * @param info the Properties to use as an argument to the constructor
+ * @return the newly constructed MusicInterface, or null if one cannot be found.
+ */
+ public static MusicInterface createMusicInterface(String name, String url, Properties info, DatabasePartition ranges) {
+ for (Class<?> cl : Utils.getClassesImplementing(MusicInterface.class)) {
+ try {
+ Constructor<?> con = cl.getConstructor();
+ if (con != null) { //TODO: is this necessary? Don't think it could ever be null?
+ MusicInterface mi = (MusicInterface) con.newInstance();
+ String miname = mi.getMixinName();
+ logger.info(EELFLoggerDelegate.applicationLogger, "Checking "+miname);
+ if (miname.equalsIgnoreCase(name)) {
+ con = cl.getConstructor(String.class, Properties.class, DatabasePartition.class);
+ if (con != null) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"Found match: "+miname);
+ return (MusicInterface) con.newInstance(url, info, ranges);
+ }
+ }
+ }
+ } catch (InvocationTargetException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e.getCause().toString());
+ }
+ catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createMusicInterface: "+e);
+ }
+ }
+ return null;
+ }
+
+ // Unfortunately, this version does not work when MDBC is built as a JBoss module,
+ // where something funny is happening with the classloaders
+// @SuppressWarnings("unused")
+// private static List<Class<?>> getClassesImplementingOld(Class<?> implx) {
+// List<Class<?>> list = new ArrayList<Class<?>>();
+// try {
+// ClassLoader cldr = MixinFactory.class.getClassLoader();
+// while (cldr != null) {
+// ClassPath cp = ClassPath.from(cldr);
+// for (ClassPath.ClassInfo x : cp.getAllClasses()) {
+// if (x.toString().startsWith("com.att.")) { // mixins must have a package starting with com.att.
+// Class<?> cl = x.load();
+// if (impl(cl, implx)) {
+// list.add(cl);
+// }
+// }
+// }
+// cldr = cldr.getParent();
+// }
+// } catch (IOException e) {
+// // ignore
+// }
+// return list;
+// }
+ static boolean impl(Class<?> cl, Class<?> imp) {
+ for (Class<?> c2 : cl.getInterfaces()) {
+ if (c2 == imp) {
+ return true;
+ }
+ }
+ Class<?> c2 = cl.getSuperclass();
+ return (c2 != null) ? impl(c2, imp) : false;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java b/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java
new file mode 100755
index 0000000..ea32a85
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MusicConnector.java
@@ -0,0 +1,124 @@
+package com.att.research.mdbc.mixins;
+
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.List;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.datastax.driver.core.Cluster;
+import com.datastax.driver.core.HostDistance;
+import com.datastax.driver.core.Metadata;
+import com.datastax.driver.core.PoolingOptions;
+import com.datastax.driver.core.Session;
+import com.datastax.driver.core.exceptions.NoHostAvailableException;
+import org.onap.music.main.MusicPureCassaCore;
+
+/**
+ * This class allows for management of the Cassandra Cluster and Session objects.
+ *
+ * @author Robert P. Eby
+ */
+public class MusicConnector {
+
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MusicConnector.class);
+
+ private Session session;
+ private Cluster cluster;
+
+ protected MusicConnector() {
+ //to defeat instantiation since this is a singleton
+ }
+
+ public MusicConnector(String address) {
+// connectToCassaCluster(address);
+ connectToMultipleAddresses(address);
+ }
+
+ public Session getSession() {
+ return session;
+ }
+
+ public void close() {
+ if (session != null)
+ session.close();
+ session = null;
+ if (cluster != null)
+ cluster.close();
+ cluster = null;
+ }
+
+ private List<String> getAllPossibleLocalIps(){
+ ArrayList<String> allPossibleIps = new ArrayList<String>();
+ try {
+ Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces();
+ while(en.hasMoreElements()){
+ NetworkInterface ni=(NetworkInterface) en.nextElement();
+ Enumeration<InetAddress> ee = ni.getInetAddresses();
+ while(ee.hasMoreElements()) {
+ InetAddress ia= (InetAddress) ee.nextElement();
+ allPossibleIps.add(ia.getHostAddress());
+ }
+ }
+ } catch (SocketException e) {
+ e.printStackTrace();
+ }
+ return allPossibleIps;
+ }
+
+ private void connectToMultipleAddresses(String address) {
+ MusicPureCassaCore.getDSHandle(address);
+ /*
+ PoolingOptions poolingOptions =
+ new PoolingOptions()
+ .setConnectionsPerHost(HostDistance.LOCAL, 4, 10)
+ .setConnectionsPerHost(HostDistance.REMOTE, 2, 4);
+ String[] music_hosts = address.split(",");
+ if (cluster == null) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"Initializing MUSIC Client with endpoints "+address);
+ cluster = Cluster.builder()
+ .withPort(9042)
+ .withPoolingOptions(poolingOptions)
+ .withoutMetrics()
+ .addContactPoints(music_hosts)
+ .build();
+ Metadata metadata = cluster.getMetadata();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address);
+
+ }
+ session = cluster.connect();
+ */
+ }
+
+ @SuppressWarnings("unused")
+ private void connectToCassaCluster(String address) {
+ PoolingOptions poolingOptions =
+ new PoolingOptions()
+ .setConnectionsPerHost(HostDistance.LOCAL, 4, 10)
+ .setConnectionsPerHost(HostDistance.REMOTE, 2, 4);
+ Iterator<String> it = getAllPossibleLocalIps().iterator();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Iterating through possible ips:"+getAllPossibleLocalIps());
+
+ while (it.hasNext()) {
+ try {
+ cluster = Cluster.builder()
+ .withPort(9042)
+ .withPoolingOptions(poolingOptions)
+ .withoutMetrics()
+ .addContactPoint(address)
+ .build();
+ //cluster.getConfiguration().getSocketOptions().setReadTimeoutMillis(Integer.MAX_VALUE);
+ Metadata metadata = cluster.getMetadata();
+ logger.info(EELFLoggerDelegate.applicationLogger,"Connected to cluster:"+metadata.getClusterName()+" at address:"+address);
+
+ session = cluster.connect();
+ break;
+ } catch (NoHostAvailableException e) {
+ address = it.next();
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java b/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java
new file mode 100755
index 0000000..94b3ac6
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MusicInterface.java
@@ -0,0 +1,178 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.json.JSONObject;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+import org.onap.music.exceptions.MusicLockingException;
+
+/**
+ * This Interface defines the methods that MDBC needs for a class to provide access to the persistence layer of MUSIC.
+ *
+ * @author Robert P. Eby
+ */
+public interface MusicInterface {
+ /**
+ * This function is used to created all the required data structures, both local
+ * \TODO Check if this function is required in the MUSIC interface or could be just created on the constructor
+ */
+ void initializeMdbcDataStructures() throws MDBCServiceException;
+ /**
+ * Get the name of this MusicInterface mixin object.
+ * @return the name
+ */
+ String getMixinName();
+ /**
+ * Gets the name of this MusicInterface mixin's default primary key name
+ * @return default primary key name
+ */
+ String getMusicDefaultPrimaryKeyName();
+ /**
+ * generates a key or placeholder for what is required for a primary key
+ * @return a primary key
+ */
+ String generateUniqueKey();
+
+ /**
+ * Find the key used with Music for a table that was created without a primary index
+ * Name is long to avoid developers using it. For cassandra performance in this operation
+ * is going to be really bad
+ * @param ti information of the table in the SQL layer
+ * @param table name of the table
+ * @param dbRow row obtained from the SQL layer
+ * @return key associated with the row
+ */
+ String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String table, JSONObject dbRow);
+ /**
+ * Do what is needed to close down the MUSIC connection.
+ */
+ void close();
+ /**
+ * This method creates a keyspace in Music/Cassandra to store the data corresponding to the SQL tables.
+ * The keyspace name comes from the initialization properties passed to the JDBC driver.
+ */
+ void createKeyspace();
+ /**
+ * This method performs all necessary initialization in Music/Cassandra to store the table <i>tableName</i>.
+ * @param tableName the table to initialize MUSIC for
+ */
+ void initializeMusicForTable(TableInfo ti, String tableName);
+ /**
+ * Create a <i>dirty row</i> table for the real table <i>tableName</i>. The primary keys columns from the real table are recreated in
+ * the dirty table, along with a "REPLICA__" column that names the replica that should update it's internal state from MUSIC.
+ * @param tableName the table to create a "dirty" table for
+ */
+ void createDirtyRowTable(TableInfo ti, String tableName);
+ /**
+ * Drop the dirty row table for <i>tableName</i> from MUSIC.
+ * @param tableName the table being dropped
+ */
+ void dropDirtyRowTable(String tableName);
+ /**
+ * Drops the named table and its dirty row table (for all replicas) from MUSIC. The dirty row table is dropped first.
+ * @param tableName This is the table that has been dropped
+ */
+ void clearMusicForTable(String tableName);
+ /**
+ * Mark rows as "dirty" in the dirty rows table for <i>tableName</i>. Rows are marked for all replicas but
+ * this one (this replica already has the up to date data).
+ * @param tableName the table we are marking dirty
+ * @param keys an ordered list of the values being put into the table. The values that correspond to the tables'
+ * primary key are copied into the dirty row table.
+ */
+ void markDirtyRow(TableInfo ti, String tableName, JSONObject keys);
+ /**
+ * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys
+ * @param tableName the table we are removing dirty entries from
+ * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row.
+ */
+ void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys);
+ /**
+ * Get a list of "dirty rows" for a table. The dirty rows returned apply only to this replica,
+ * and consist of a Map of primary key column names and values.
+ * @param tableName the table we are querying for
+ * @return a list of maps; each list item is a map of the primary key names and values for that "dirty row".
+ */
+ List<Map<String,Object>> getDirtyRows(TableInfo ti, String tableName);
+ /**
+ * This method is called whenever there is a DELETE to a row on a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. MUSIC propagates
+ * it to the other replicas.
+ * @param tableName This is the table that has changed.
+ * @param oldRow This is a copy of the old row being deleted
+ */
+ void deleteFromEntityTableInMusic(TableInfo ti,String tableName, JSONObject oldRow);
+ /**
+ * This method is called whenever there is a SELECT on a local SQL table, wherein it first checks the local
+ * dirty bits table to see if there are any rows in Cassandra whose value needs to be copied to the local SQL DB.
+ * @param tableName This is the table on which the select is being performed
+ */
+ void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName);
+ /**
+ * This method is called whenever there is an INSERT or UPDATE to a local SQL table, wherein it updates the
+ * MUSIC/Cassandra tables (both dirty bits and actual data) corresponding to the SQL write. Music propagates
+ * it to the other replicas.
+ * @param tableName This is the table that has changed.
+ * @param changedRow This is information about the row that has changed
+ */
+ void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow);
+
+ Object[] getObjects(TableInfo ti, String tableName, JSONObject row);
+ /**
+ * Returns the primary key associated with the given row
+ * @param ti info of the table that is associated with the row
+ * @param tableName name of the table that contains the row
+ * @param changedRow row that is going to contain the information associated with the primary key
+ * @return primary key of the row
+ */
+ String getMusicKeyFromRow(TableInfo ti, String tableName, JSONObject changedRow);
+
+ /**
+ * Commits the corresponding REDO-log into MUSIC
+ *
+ * @param dbi, the database interface use in the local SQL cache, where the music interface is being used
+ * @param partition
+ * @param transactionDigest digest of the transaction that is being committed into the Redo log in music. It has to be a HashMap, because it is required to be serializable
+ * @param txId id associated with the log being send
+ * @param progressKeeper data structure that is used to handle to detect failures, and know what to do
+ * @throws MDBCServiceException
+ */
+ void commitLog(DBInterface dbi, DatabasePartition partition, HashMap<Range,StagingTable> transactionDigest, String txId,TxCommitProgress progressKeeper) throws MDBCServiceException;
+
+ TransactionInformationElement getTransactionInformation(String id);
+
+ TitReference createTransactionInformationRow(TransactionInformationElement info);
+
+ void appendToRedoLog(TitReference titRow, DatabasePartition partition, RedoRecordId newRecord);
+
+ void appendRedoRecord(String redoRecordTable, RedoRecordId newRecord, String transactionDigest);
+
+ void updateTablePartition(String table, DatabasePartition partition);
+
+ TitReference createPartition(List<String> tables, int replicationFactor, String currentOwner);
+
+ void updatePartitionOwner(String partition, String owner);
+
+ void updateTitReference(String partition, TitReference tit);
+
+ void updatePartitionReplicationFactor(String partition, int replicationFactor);
+
+ void addRedoHistory(DatabasePartition partition, TitReference newTit, List<TitReference> old);
+
+ List<RedoHistoryElement> getHistory(DatabasePartition partition);
+
+ List<PartitionInformation> getPartitionInformation(DatabasePartition partition);
+
+ TablePartitionInformation getTablePartitionInformation(String table);
+
+ HashMap<Range,StagingTable> getTransactionDigest(RedoRecordId id);
+
+
+}
+
diff --git a/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java b/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java
new file mode 100644
index 0000000..1fee59c
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MusicMixin.java
@@ -0,0 +1,249 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import com.att.research.mdbc.LockId;
+import org.json.JSONObject;
+import org.onap.music.exceptions.MusicLockingException;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+import org.onap.music.main.MusicPureCassaCore;
+
+/**
+
+ *
+ */
+public class MusicMixin implements MusicInterface {
+
+ public static Map<Integer, Set<String>> currentLockMap = new HashMap<>();
+ public static List<String> criticalTables = new ArrayList<>();
+
+ @Override
+ public String getMixinName() {
+ //
+ return null;
+ }
+
+ @Override
+ public String getMusicDefaultPrimaryKeyName() {
+ //
+ return null;
+ }
+
+ @Override
+ public String generateUniqueKey() {
+ //
+ return null;
+ }
+
+ @Override
+ public String getMusicKeyFromRow(TableInfo ti, String table, JSONObject dbRow) {
+ //
+ return null;
+ }
+
+ @Override
+ public void close() {
+ //
+
+ }
+
+ @Override
+ public void createKeyspace() {
+ //
+
+ }
+
+ @Override
+ public void initializeMusicForTable(TableInfo ti, String tableName) {
+ //
+
+ }
+
+ @Override
+ public void createDirtyRowTable(TableInfo ti, String tableName) {
+ //
+
+ }
+
+ @Override
+ public void dropDirtyRowTable(String tableName) {
+ //
+
+ }
+
+ @Override
+ public void clearMusicForTable(String tableName) {
+ //
+
+ }
+
+ @Override
+ public void markDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ //
+
+ }
+
+ @Override
+ public void cleanDirtyRow(TableInfo ti, String tableName, JSONObject keys) {
+ //
+
+ }
+
+ @Override
+ public List<Map<String, Object>> getDirtyRows(TableInfo ti, String tableName) {
+ //
+ return null;
+ }
+
+ @Override
+ public void deleteFromEntityTableInMusic(TableInfo ti, String tableName, JSONObject oldRow) {
+ //
+
+ }
+
+ @Override
+ public void readDirtyRowsAndUpdateDb(DBInterface dbi, String tableName) {
+ //
+
+ }
+
+ @Override
+ public void updateDirtyRowAndEntityTableInMusic(TableInfo ti, String tableName, JSONObject changedRow) {
+ updateDirtyRowAndEntityTableInMusic(tableName, changedRow, false);
+
+ }
+
+ public void updateDirtyRowAndEntityTableInMusic(String tableName, JSONObject changedRow, boolean isCritical) { }
+
+
+ public static void loadProperties() {
+ Properties prop = new Properties();
+ InputStream input = null;
+ try {
+ input = MusicMixin.class.getClassLoader().getResourceAsStream("mdbc.properties");
+ prop.load(input);
+ String crTable = prop.getProperty("critical.tables");
+ String[] tableArr = crTable.split(",");
+ criticalTables = Arrays.asList(tableArr);
+
+ }
+ catch (Exception ex) {
+ ex.printStackTrace();
+ }
+ finally {
+ if (input != null) {
+ try {
+ input.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ public static void releaseZKLocks(Set<LockId> lockIds) {
+ for(LockId lockId: lockIds) {
+ System.out.println("Releasing lock: "+lockId);
+ try {
+ MusicPureCassaCore.voluntaryReleaseLock(lockId.getFullyQualifiedLockKey(),lockId.getLockReference());
+ MusicPureCassaCore.destroyLockRef(lockId.getFullyQualifiedLockKey(),lockId.getLockReference());
+ } catch (MusicLockingException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public String getMusicKeyFromRowWithoutPrimaryIndexes(TableInfo ti, String tableName, JSONObject changedRow) {
+ //
+ return null;
+ }
+
+ @Override
+ public void initializeMdbcDataStructures() {
+ //
+
+ }
+
+ @Override
+ public Object[] getObjects(TableInfo ti, String tableName, JSONObject row) {
+ return null;
+ }
+
+ @Override
+ public void commitLog(DBInterface dbi, DatabasePartition partition, HashMap<Range,StagingTable> transactionDigest, String txId,TxCommitProgress progressKeeper)
+ throws MDBCServiceException{
+ // TODO Auto-generated method stub
+ }
+
+ @Override
+ public TablePartitionInformation getTablePartitionInformation(String table){
+ return null;
+ }
+
+ @Override
+ public HashMap<Range,StagingTable> getTransactionDigest(RedoRecordId id){
+ return null;
+ }
+
+ @Override
+ public TransactionInformationElement getTransactionInformation(String id){
+ return null;
+ }
+
+ @Override
+ public void updateTitReference(String partition, TitReference tit){}
+
+ @Override
+ public List<RedoHistoryElement> getHistory(DatabasePartition partition){
+ return null;
+ }
+
+ @Override
+ public void addRedoHistory(DatabasePartition partition, TitReference newTit, List<TitReference> old){
+ }
+
+ @Override
+ public TitReference createPartition(List<String> tables, int replicationFactor, String currentOwner){
+ return null;
+ }
+
+ @Override
+ public List<PartitionInformation> getPartitionInformation(DatabasePartition partition){
+ return null;
+ }
+
+ @Override
+ public TitReference createTransactionInformationRow(TransactionInformationElement info){
+ return null;
+ }
+
+ @Override
+ public void appendToRedoLog(TitReference titRow, DatabasePartition partition, RedoRecordId newRecord){
+ }
+
+ @Override
+ public void appendRedoRecord(String redoRecordTable, RedoRecordId newRecord, String transactionDigest){
+ }
+
+ @Override
+ public void updateTablePartition(String table, DatabasePartition partition){}
+
+ @Override
+ public void updatePartitionOwner(String partition, String owner){}
+
+ @Override
+ public void updatePartitionReplicationFactor(String partition, int replicationFactor){}
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java b/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java
new file mode 100755
index 0000000..a836a39
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/MySQLMixin.java
@@ -0,0 +1,784 @@
+package com.att.research.mdbc.mixins;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.json.JSONObject;
+import org.json.JSONTokener;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.MusicSqlManager;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.TableInfo;
+
+import net.sf.jsqlparser.JSQLParserException;
+import net.sf.jsqlparser.parser.CCJSqlParserUtil;
+import net.sf.jsqlparser.statement.delete.Delete;
+import net.sf.jsqlparser.statement.insert.Insert;
+import net.sf.jsqlparser.statement.update.Update;
+
+/**
+ * This class provides the methods that MDBC needs in order to mirror data to/from a
+ * <a href="https://dev.mysql.com/">MySQL</a> or <a href="http://mariadb.org/">MariaDB</a> database instance.
+ * This class uses the <code>JSON_OBJECT()</code> database function, which means it requires the following
+ * minimum versions of either database:
+ * <table summary="">
+ * <tr><th>DATABASE</th><th>VERSION</th></tr>
+ * <tr><td>MySQL</td><td>5.7.8</td></tr>
+ * <tr><td>MariaDB</td><td>10.2.3 (Note: 10.2.3 is currently (July 2017) a <i>beta</i> release)</td></tr>
+ * </table>
+ *
+ * @author Robert P. Eby
+ */
+public class MySQLMixin implements DBInterface {
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(MySQLMixin.class);
+
+ public static final String MIXIN_NAME = "mysql";
+ public static final String TRANS_TBL = "MDBC_TRANSLOG";
+ private static final String CREATE_TBL_SQL =
+ "CREATE TABLE IF NOT EXISTS "+TRANS_TBL+
+ " (IX INT AUTO_INCREMENT, OP CHAR(1), TABLENAME VARCHAR(255), NEWROWDATA VARCHAR(1024), KEYDATA VARCHAR(1024), CONNECTION_ID INT,PRIMARY KEY (IX))";
+
+ private final MusicSqlManager msm;
+ private final int connId;
+ private final String dbName;
+ private final Connection dbConnection;
+ private final Map<String, TableInfo> tables;
+ private boolean server_tbl_created = false;
+
+ public MySQLMixin() {
+ this.msm = null;
+ this.connId = 0;
+ this.dbName = null;
+ this.dbConnection = null;
+ this.tables = null;
+ }
+ public MySQLMixin(MusicSqlManager msm, String url, Connection conn, Properties info) {
+ this.msm = msm;
+ this.connId = generateConnID(conn);
+ this.dbName = getDBName(conn);
+ this.dbConnection = conn;
+ this.tables = new HashMap<String, TableInfo>();
+ }
+ // This is used to generate a unique connId for this connection to the DB.
+ private int generateConnID(Connection conn) {
+ int rv = (int) System.currentTimeMillis(); // random-ish
+ try {
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery("SELECT CONNECTION_ID() AS IX");
+ if (rs.next()) {
+ rv = rs.getInt("IX");
+ }
+ stmt.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"generateConnID: problem generating a connection ID!");
+ }
+ return rv;
+ }
+
+ /**
+ * Get the name of this DBnterface mixin object.
+ * @return the name
+ */
+ @Override
+ public String getMixinName() {
+ return MIXIN_NAME;
+ }
+
+ @Override
+ public void close() {
+ // nothing yet
+ }
+
+ /**
+ * Determines the db name associated with the connection
+ * This is the private/internal method that actually determines the name
+ * @param conn
+ * @return
+ */
+ private String getDBName(Connection conn) {
+ String dbname = "mdbc"; //default name
+ try {
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery("SELECT DATABASE() AS DB");
+ if (rs.next()) {
+ dbname = rs.getString("DB");
+ }
+ stmt.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "getDBName: problem getting database name from mysql");
+ }
+ return dbname;
+ }
+
+ public String getDatabaseName() {
+ return this.dbName;
+ }
+ /**
+ * Get a set of the table names in the database.
+ * @return the set
+ */
+ @Override
+ public Set<String> getSQLTableSet() {
+ Set<String> set = new TreeSet<String>();
+ String sql = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=DATABASE() AND TABLE_TYPE='BASE TABLE'";
+ try {
+ Statement stmt = dbConnection.createStatement();
+ ResultSet rs = stmt.executeQuery(sql);
+ while (rs.next()) {
+ String s = rs.getString("TABLE_NAME");
+ set.add(s);
+ }
+ stmt.close();
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"getSQLTableSet: "+e);
+ }
+ logger.debug(EELFLoggerDelegate.applicationLogger,"getSQLTableSet returning: "+ set);
+ return set;
+ }
+/*
+mysql> describe tables;
++-----------------+---------------------+------+-----+---------+-------+
+| Field | Type | Null | Key | Default | Extra |
++-----------------+---------------------+------+-----+---------+-------+
+| TABLE_CATALOG | varchar(512) | NO | | | |
+| TABLE_SCHEMA | varchar(64) | NO | | | |
+| TABLE_NAME | varchar(64) | NO | | | |
+| TABLE_TYPE | varchar(64) | NO | | | |
+| ENGINE | varchar(64) | YES | | NULL | |
+| VERSION | bigint(21) unsigned | YES | | NULL | |
+| ROW_FORMAT | varchar(10) | YES | | NULL | |
+| TABLE_ROWS | bigint(21) unsigned | YES | | NULL | |
+| AVG_ROW_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| DATA_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| MAX_DATA_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| INDEX_LENGTH | bigint(21) unsigned | YES | | NULL | |
+| DATA_FREE | bigint(21) unsigned | YES | | NULL | |
+| AUTO_INCREMENT | bigint(21) unsigned | YES | | NULL | |
+| CREATE_TIME | datetime | YES | | NULL | |
+| UPDATE_TIME | datetime | YES | | NULL | |
+| CHECK_TIME | datetime | YES | | NULL | |
+| TABLE_COLLATION | varchar(32) | YES | | NULL | |
+| CHECKSUM | bigint(21) unsigned | YES | | NULL | |
+| CREATE_OPTIONS | varchar(255) | YES | | NULL | |
+| TABLE_COMMENT | varchar(2048) | NO | | | |
++-----------------+---------------------+------+-----+---------+-------+
+ */
+ /**
+ * Return a TableInfo object for the specified table.
+ * This method first looks in a cache of previously constructed TableInfo objects for the table.
+ * If not found, it queries the INFORMATION_SCHEMA.COLUMNS table to obtain the column names, types, and indexes of the table.
+ * It creates a new TableInfo object with the results.
+ * @param tableName the table to look up
+ * @return a TableInfo object containing the info we need, or null if the table does not exist
+ */
+ @Override
+ public TableInfo getTableInfo(String tableName) {
+ TableInfo ti = tables.get(tableName);
+ if (ti == null) {
+ try {
+ String tbl = tableName;//.toUpperCase();
+ String sql = "SELECT COLUMN_NAME, DATA_TYPE, COLUMN_KEY FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA=DATABASE() AND TABLE_NAME='"+tbl+"'";
+ ResultSet rs = executeSQLRead(sql);
+ if (rs != null) {
+ ti = new TableInfo();
+ while (rs.next()) {
+ String name = rs.getString("COLUMN_NAME");
+ String type = rs.getString("DATA_TYPE");
+ String ckey = rs.getString("COLUMN_KEY");
+ ti.columns.add(name);
+ ti.coltype.add(mapDatatypeNameToType(type));
+ ti.iskey.add(ckey != null && !ckey.equals(""));
+ }
+ rs.getStatement().close();
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL.");
+ }
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Cannot retrieve table info for table "+tableName+" from MySQL: "+e);
+ return null;
+ }
+ tables.put(tableName, ti);
+ }
+ return ti;
+ }
+ // Map MySQL data type names to the java.sql.Types equivalent
+ private int mapDatatypeNameToType(String nm) {
+ switch (nm) {
+ case "tinyint": return Types.TINYINT;
+ case "smallint": return Types.SMALLINT;
+ case "mediumint":
+ case "int": return Types.INTEGER;
+ case "bigint": return Types.BIGINT;
+ case "decimal":
+ case "numeric": return Types.DECIMAL;
+ case "float": return Types.FLOAT;
+ case "double": return Types.DOUBLE;
+ case "date":
+ case "datetime": return Types.DATE;
+ case "time": return Types.TIME;
+ case "timestamp": return Types.TIMESTAMP;
+ case "char": return Types.CHAR;
+ case "text":
+ case "varchar": return Types.VARCHAR;
+ case "mediumblob":
+ case "blob": return Types.VARCHAR;
+ default:
+ logger.error(EELFLoggerDelegate.errorLogger,"unrecognized and/or unsupported data type "+nm);
+ return Types.VARCHAR;
+ }
+ }
+ @Override
+ public void createSQLTriggers(String tableName) {
+ // Don't create triggers for the table the triggers write into!!!
+ if (tableName.equals(TRANS_TBL))
+ return;
+ try {
+ if (!server_tbl_created) {
+ try {
+ Statement stmt = dbConnection.createStatement();
+ stmt.execute(CREATE_TBL_SQL);
+ stmt.close();
+ logger.info(EELFLoggerDelegate.applicationLogger,"createSQLTriggers: Server side dirty table created.");
+ server_tbl_created = true;
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: problem creating the "+TRANS_TBL+" table!");
+ }
+ }
+
+ // Give the triggers a way to find this MSM
+ for (String name : getTriggerNames(tableName)) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"ADD trigger "+name+" to msm_map");
+ //\TODO fix this is an error
+ //msm.register(name);
+ }
+ // No SELECT trigger
+ executeSQLWrite(generateTrigger(tableName, "INSERT"));
+ executeSQLWrite(generateTrigger(tableName, "UPDATE"));
+ executeSQLWrite(generateTrigger(tableName, "DELETE"));
+ } catch (SQLException e) {
+ if (e.getMessage().equals("Trigger already exists")) {
+ //only warn if trigger already exists
+ logger.warn(EELFLoggerDelegate.applicationLogger, "createSQLTriggers" + e);
+ } else {
+ logger.error(EELFLoggerDelegate.errorLogger,"createSQLTriggers: "+e);
+ }
+ }
+ }
+/*
+CREATE TRIGGER `triggername` BEFORE UPDATE ON `table`
+FOR EACH ROW BEGIN
+INSERT INTO `log_table` ( `field1` `field2`, ...) VALUES ( NEW.`field1`, NEW.`field2`, ...) ;
+END;
+
+OLD.field refers to the old value
+NEW.field refers to the new value
+*/
+ private String generateTrigger(String tableName, String op) {
+ boolean isdelete = op.equals("DELETE");
+ boolean isinsert = op.equals("INSERT");
+ TableInfo ti = getTableInfo(tableName);
+ StringBuilder newJson = new StringBuilder("JSON_OBJECT("); // JSON_OBJECT(key, val, key, val) page 1766
+ StringBuilder keyJson = new StringBuilder("JSON_OBJECT(");
+ String pfx = "";
+ String keypfx = "";
+ for (String col : ti.columns) {
+ newJson.append(pfx)
+ .append("'").append(col).append("', ")
+ .append(isdelete ? "OLD." : "NEW.")
+ .append(col);
+ if (ti.iskey(col) || !ti.hasKey()) {
+ keyJson.append(keypfx)
+ .append("'").append(col).append("', ")
+ .append(isinsert ? "NEW." : "OLD.")
+ .append(col);
+ keypfx = ", ";
+ }
+ pfx = ", ";
+ }
+ newJson.append(")");
+ keyJson.append(")");
+ //\TODO check if using mysql driver, so instead check the exception
+ StringBuilder sb = new StringBuilder()
+ .append("CREATE TRIGGER ") // IF NOT EXISTS not supported by MySQL!
+ .append(String.format("%s_%s", op.substring(0, 1), tableName))
+ .append(" AFTER ")
+ .append(op)
+ .append(" ON ")
+ .append(tableName)
+ .append(" FOR EACH ROW INSERT INTO ")
+ .append(TRANS_TBL)
+ .append(" (TABLENAME, OP, NEWROWDATA, KEYDATA, CONNECTION_ID) VALUES('")
+ .append(tableName)
+ .append("', ")
+ .append(isdelete ? "'D'" : (op.equals("INSERT") ? "'I'" : "'U'"))
+ .append(", ")
+ .append(newJson.toString())
+ .append(", ")
+ .append(keyJson.toString())
+ .append(", ")
+ .append("CONNECTION_ID()")
+ .append(")");
+ return sb.toString();
+ }
+ private String[] getTriggerNames(String tableName) {
+ return new String[] {
+ "I_" + tableName, // INSERT trigger
+ "U_" + tableName, // UPDATE trigger
+ "D_" + tableName // DELETE trigger
+ };
+ }
+
+ @Override
+ public void dropSQLTriggers(String tableName) {
+ try {
+ for (String name : getTriggerNames(tableName)) {
+ logger.info(EELFLoggerDelegate.applicationLogger,"REMOVE trigger "+name+" from msmmap");
+ executeSQLWrite("DROP TRIGGER IF EXISTS " +name);
+ //\TODO Fix this is an error
+ //msm.unregister(name);
+ }
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"dropSQLTriggers: "+e);
+ }
+ }
+
+ @Override
+ public void insertRowIntoSqlDb(String tableName, Map<String, Object> map) {
+ TableInfo ti = getTableInfo(tableName);
+ String sql = "";
+ if (rowExists(tableName, ti, map)) {
+ // Update - Construct the what and where strings for the DB write
+ StringBuilder what = new StringBuilder();
+ StringBuilder where = new StringBuilder();
+ String pfx = "";
+ String pfx2 = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ String col = ti.columns.get(i);
+ String val = Utils.getStringValue(map.get(col));
+ if (ti.iskey.get(i)) {
+ where.append(pfx).append(col).append("=").append(val);
+ pfx = " AND ";
+ } else {
+ what.append(pfx2).append(col).append("=").append(val);
+ pfx2 = ", ";
+ }
+ }
+ sql = String.format("UPDATE %s SET %s WHERE %s", tableName, what.toString(), where.toString());
+ } else {
+ // Construct the value string and column name string for the DB write
+ StringBuilder fields = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ String pfx = "";
+ for (String col : ti.columns) {
+ fields.append(pfx).append(col);
+ values.append(pfx).append(Utils.getStringValue(map.get(col)));
+ pfx = ", ";
+ }
+ sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString());
+ }
+ try {
+ executeSQLWrite(sql);
+ } catch (SQLException e1) {
+ logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite: "+e1);
+ }
+ // TODO - remove any entries from MDBC_TRANSLOG corresponding to this update
+ // SELECT IX, OP, KEYDATA FROM MDBC_TRANS_TBL WHERE CONNID = "+connId AND TABLENAME = tblname
+ }
+
+ private boolean rowExists(String tableName, TableInfo ti, Map<String, Object> map) {
+ StringBuilder where = new StringBuilder();
+ String pfx = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ String col = ti.columns.get(i);
+ String val = Utils.getStringValue(map.get(col));
+ where.append(pfx).append(col).append("=").append(val);
+ pfx = " AND ";
+ }
+ }
+ String sql = String.format("SELECT * FROM %s WHERE %s", tableName, where.toString());
+ ResultSet rs = executeSQLRead(sql);
+ try {
+ boolean rv = rs.next();
+ rs.close();
+ return rv;
+ } catch (SQLException e) {
+ return false;
+ }
+ }
+
+
+ @Override
+ public void deleteRowFromSqlDb(String tableName, Map<String, Object> map) {
+ TableInfo ti = getTableInfo(tableName);
+ StringBuilder where = new StringBuilder();
+ String pfx = "";
+ for (int i = 0; i < ti.columns.size(); i++) {
+ if (ti.iskey.get(i)) {
+ String col = ti.columns.get(i);
+ Object val = map.get(col);
+ where.append(pfx).append(col).append("=").append(Utils.getStringValue(val));
+ pfx = " AND ";
+ }
+ }
+ try {
+ String sql = String.format("DELETE FROM %s WHERE %s", tableName, where.toString());
+ executeSQLWrite(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * This method executes a read query in the SQL database. Methods that call this method should be sure
+ * to call resultset.getStatement().close() when done in order to free up resources.
+ * @param sql the query to run
+ * @return a ResultSet containing the rows returned from the query
+ */
+ @Override
+ public ResultSet executeSQLRead(String sql) {
+ logger.debug(EELFLoggerDelegate.applicationLogger,"executeSQLRead");
+ logger.debug("Executing SQL read:"+ sql);
+ ResultSet rs = null;
+ try {
+ Statement stmt = dbConnection.createStatement();
+ rs = stmt.executeQuery(sql);
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"executeSQLRead"+e);
+ }
+ return rs;
+ }
+
+ /**
+ * This method executes a write query in the sql database.
+ * @param sql the SQL to be sent to MySQL
+ * @throws SQLException if an underlying JDBC method throws an exception
+ */
+ protected void executeSQLWrite(String sql) throws SQLException {
+ logger.debug(EELFLoggerDelegate.applicationLogger, "Executing SQL write:"+ sql);
+
+ Statement stmt = dbConnection.createStatement();
+ stmt.execute(sql);
+ stmt.close();
+ }
+
+ /**
+ * Code to be run within the DB driver before a SQL statement is executed. This is where tables
+ * can be synchronized before a SELECT, for those databases that do not support SELECT triggers.
+ * @param sql the SQL statement that is about to be executed
+ * @return list of keys that will be updated, if they can't be determined afterwards (i.e. sql table doesn't have primary key)
+ */
+ @Override
+ public void preStatementHook(final String sql) {
+ if (sql == null) {
+ return;
+ }
+ String cmd = sql.trim().toLowerCase();
+ if (cmd.startsWith("select")) {
+ String[] parts = sql.trim().split(" ");
+ Set<String> set = getSQLTableSet();
+ for (String part : parts) {
+ if (set.contains(part.toUpperCase())) {
+ // Found a candidate table name in the SELECT SQL -- update this table
+ //msm.readDirtyRowsAndUpdateDb(part);
+ }
+ }
+ }
+ }
+
+ /**
+ * Code to be run within the DB driver after a SQL statement has been executed. This is where remote
+ * statement actions can be copied back to Cassandra/MUSIC.
+ * @param sql the SQL statement that was executed
+ */
+ @Override
+ public void postStatementHook(final String sql,Map<Range,StagingTable> transactionDigest) {
+ if (sql != null) {
+ String[] parts = sql.trim().split(" ");
+ String cmd = parts[0].toLowerCase();
+ if ("delete".equals(cmd) || "insert".equals(cmd) || "update".equals(cmd)) {
+ try {
+ this.updateStagingTable(transactionDigest);
+ } catch (NoSuchFieldException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ private OperationType toOpEnum(String operation) throws NoSuchFieldException {
+ switch (operation.toLowerCase()) {
+ case "i":
+ return OperationType.INSERT;
+ case "d":
+ return OperationType.DELETE;
+ case "u":
+ return OperationType.UPDATE;
+ case "s":
+ return OperationType.SELECT;
+ default:
+ logger.error(EELFLoggerDelegate.errorLogger,"Invalid operation selected: ["+operation+"]");
+ throw new NoSuchFieldException("Invalid operation enum");
+ }
+
+ }
+ /**
+ * Copy data that is in transaction table into music interface
+ * @param transactionDigests
+ * @throws NoSuchFieldException
+ */
+ private void updateStagingTable(Map<Range,StagingTable> transactionDigests) throws NoSuchFieldException {
+ // copy from DB.MDBC_TRANSLOG where connid == myconnid
+ // then delete from MDBC_TRANSLOG
+ String sql2 = "SELECT IX, TABLENAME, OP, KEYDATA, NEWROWDATA FROM "+TRANS_TBL +" WHERE CONNECTION_ID = " + this.connId;
+ try {
+ ResultSet rs = executeSQLRead(sql2);
+ Set<Integer> rows = new TreeSet<Integer>();
+ while (rs.next()) {
+ int ix = rs.getInt("IX");
+ String op = rs.getString("OP");
+ OperationType opType = toOpEnum(op);
+ String tbl = rs.getString("TABLENAME");
+ String keydataStr = rs.getString("KEYDATA");
+ String newRowStr = rs.getString("NEWROWDATA");
+ JSONObject newRow = new JSONObject(new JSONTokener(newRowStr));
+ String musicKey;
+ TableInfo ti = getTableInfo(tbl);
+ if (!ti.hasKey()) {
+ //create music key
+ //\TODO fix, this is completely broken
+ //if (op.startsWith("I")) {
+ //\TODO Improve the generation of primary key, it should be generated using
+ // the actual columns, otherwise performance when doing range queries are going
+ // to be even worse (see the else bracket down)
+ //
+ musicKey = msm.generateUniqueKey();
+ /*} else {
+ //get key from data
+ musicKey = msm.getMusicKeyFromRowWithoutPrimaryIndexes(tbl,newRow);
+ }*/
+ newRow.put(msm.getMusicDefaultPrimaryKeyName(), musicKey);
+ }
+ else {
+ //Use the keys
+ musicKey = msm.getMusicKeyFromRow(tbl, newRow);
+ if(musicKey.isEmpty()) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Primary key is invalid: ["+tbl+","+op+"]");
+ throw new NoSuchFieldException("Invalid operation enum");
+ }
+ }
+ Range range = new Range(tbl);
+ if(!transactionDigests.containsKey(range)) {
+ transactionDigests.put(range, new StagingTable());
+ }
+ transactionDigests.get(range).addOperation(musicKey, opType, keydataStr, newRow.toString());
+ rows.add(ix);
+ }
+ rs.getStatement().close();
+ if (rows.size() > 0) {
+ sql2 = "DELETE FROM "+TRANS_TBL+" WHERE IX = ?";
+ PreparedStatement ps = dbConnection.prepareStatement(sql2);
+ logger.debug("Executing: "+sql2);
+ logger.debug(" For ix = "+rows);
+ for (int ix : rows) {
+ ps.setInt(1, ix);
+ ps.execute();
+ }
+ ps.close();
+ }
+ } catch (SQLException e) {
+ logger.warn("Exception in postStatementHook: "+e);
+ e.printStackTrace();
+ }
+ }
+
+
+
+ /**
+ * Update music with data from MySQL table
+ *
+ * @param tableName - name of table to update in music
+ */
+ @Override
+ public void synchronizeData(String tableName) {
+ ResultSet rs = null;
+ TableInfo ti = getTableInfo(tableName);
+ String query = "SELECT * FROM "+tableName;
+
+ try {
+ rs = executeSQLRead(query);
+ if(rs==null) return;
+ while(rs.next()) {
+
+ JSONObject jo = new JSONObject();
+ if (!getTableInfo(tableName).hasKey()) {
+ String musicKey = msm.generateUniqueKey();
+ jo.put(msm.getMusicDefaultPrimaryKeyName(), musicKey);
+ }
+
+ for (String col : ti.columns) {
+ jo.put(col, rs.getString(col));
+ }
+
+ @SuppressWarnings("unused")
+ Object[] row = Utils.jsonToRow(ti,tableName, jo,msm.getMusicDefaultPrimaryKeyName());
+ //\FIXME this is wrong now, update of the dirty row and entity is now handled by the archival process
+ //msm.updateDirtyRowAndEntityTableInMusic(ti,tableName, jo);
+ }
+ } catch (Exception e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "synchronizing data " + tableName +
+ " -> " + e.getMessage());
+ }
+ finally {
+ try {
+ rs.close();
+ } catch (SQLException e) {
+ //continue
+ }
+ }
+
+ }
+
+ /**
+ * Return a list of "reserved" names, that should not be used by MySQL client/MUSIC
+ * These are reserved for mdbc
+ */
+ @Override
+ public List<String> getReservedTblNames() {
+ ArrayList<String> rsvdTables = new ArrayList<String>();
+ rsvdTables.add(TRANS_TBL);
+ //Add others here as necessary
+ return rsvdTables;
+ }
+ @Override
+ public String getPrimaryKey(String sql, String tableName) {
+ //
+ return null;
+ }
+
+ @SuppressWarnings("unused")
+ @Deprecated
+ private ArrayList<String> getMusicKey(String sql) {
+ try {
+ net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql);
+ if (stmt instanceof Insert) {
+ Insert s = (Insert) stmt;
+ String tbl = s.getTable().getName();
+ return getMusicKey(tbl, "INSERT", sql);
+ } else if (stmt instanceof Update){
+ Update u = (Update) stmt;
+ String tbl = u.getTables().get(0).getName();
+ return getMusicKey(tbl, "UPDATE", sql);
+ } else if (stmt instanceof Delete) {
+ Delete d = (Delete) stmt;
+ //TODO: IMPLEMENT
+ String tbl = d.getTable().getName();
+ return getMusicKey(tbl, "DELETE", sql);
+ } else {
+ System.err.println("Not recognized sql type");
+ }
+
+ } catch (JSQLParserException e) {
+
+ e.printStackTrace();
+ }
+ //Something went wrong here
+ return new ArrayList<String>();
+ }
+
+ /**
+ * Returns all keys that matches the current sql statement, and not in already updated keys.
+ *
+ * @param tbl
+ * @param cmd
+ * @param sql
+ */
+ @Deprecated
+ private ArrayList<String> getMusicKey(String tbl, String cmd, String sql) {
+ ArrayList<String> musicKeys = new ArrayList<String>();
+ /*
+ if (cmd.equalsIgnoreCase("insert")) {
+ //create key, return key
+ musicKeys.add(msm.generatePrimaryKey());
+ } else if (cmd.equalsIgnoreCase("update") || cmd.equalsIgnoreCase("delete")) {
+ try {
+ net.sf.jsqlparser.statement.Statement stmt = CCJSqlParserUtil.parse(sql);
+ String where;
+ if (stmt instanceof Update) {
+ where = ((Update) stmt).getWhere().toString();
+ } else if (stmt instanceof Delete) {
+ where = ((Delete) stmt).getWhere().toString();
+ } else {
+ System.err.println("Unknown type: " +stmt.getClass());
+ where = "";
+ }
+ ResultSet rs = executeSQLRead("SELECT * FROM " + tbl + " WHERE " + where);
+ musicKeys = msm.getMusicKeysWhere(tbl, Utils.parseResults(getTableInfo(tbl), rs));
+ } catch (JSQLParserException e) {
+
+ e.printStackTrace();
+ } catch (SQLException e) {
+ //Not a valid sql query
+ e.printStackTrace();
+ }
+ }
+ */
+ return musicKeys;
+ }
+
+
+ @Deprecated
+ public void insertRowIntoSqlDbOLD(String tableName, Map<String, Object> map) {
+ // First construct the value string and column name string for the db write
+ TableInfo ti = getTableInfo(tableName);
+ StringBuilder fields = new StringBuilder();
+ StringBuilder values = new StringBuilder();
+ String pfx = "";
+ for (String col : ti.columns) {
+ fields.append(pfx).append(col);
+ values.append(pfx).append(Utils.getStringValue(map.get(col)));
+ pfx = ", ";
+ }
+
+ try {
+ String sql = String.format("INSERT INTO %s (%s) VALUES (%s);", tableName, fields.toString(), values.toString());
+ executeSQLWrite(sql);
+ } catch (SQLException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Insert failed because row exists, do an update");
+ StringBuilder where = new StringBuilder();
+ pfx = "";
+ String pfx2 = "";
+ fields.setLength(0);
+ for (int i = 0; i < ti.columns.size(); i++) {
+ String col = ti.columns.get(i);
+ String val = Utils.getStringValue(map.get(col));
+ if (ti.iskey.get(i)) {
+ where.append(pfx).append(col).append("=").append(val);
+ pfx = " AND ";
+ } else {
+ fields.append(pfx2).append(col).append("=").append(val);
+ pfx2 = ", ";
+ }
+ }
+ String sql = String.format("UPDATE %s SET %s WHERE %s", tableName, fields.toString(), where.toString());
+ try {
+ executeSQLWrite(sql);
+ } catch (SQLException e1) {
+ logger.error(EELFLoggerDelegate.errorLogger,"executeSQLWrite"+e1);
+ }
+ }
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/Operation.java b/src/main/java/com/att/research/mdbc/mixins/Operation.java
new file mode 100644
index 0000000..4ca8048
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/Operation.java
@@ -0,0 +1,31 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.Serializable;
+
+import org.json.JSONObject;
+import org.json.JSONTokener;
+
+public final class Operation implements Serializable{
+
+ private static final long serialVersionUID = -1215301985078183104L;
+
+ final OperationType TYPE;
+ final String OLD_VAL;
+ final String NEW_VAL;
+
+ public Operation(OperationType type, String newVal, String oldVal) {
+ TYPE = type;
+ NEW_VAL = newVal;
+ OLD_VAL = oldVal;
+ }
+
+ public JSONObject getNewVal(){
+ JSONObject newRow = new JSONObject(new JSONTokener(NEW_VAL));
+ return newRow;
+ }
+
+ public JSONObject getOldVal(){
+ JSONObject keydata = new JSONObject(new JSONTokener(OLD_VAL));
+ return keydata;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/OperationType.java b/src/main/java/com/att/research/mdbc/mixins/OperationType.java
new file mode 100644
index 0000000..0160eb5
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/OperationType.java
@@ -0,0 +1,5 @@
+package com.att.research.mdbc.mixins;
+
+public enum OperationType{
+ DELETE, UPDATE, INSERT, SELECT
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java b/src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java
new file mode 100644
index 0000000..12b8e4f
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/PartitionInformation.java
@@ -0,0 +1,19 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public class PartitionInformation {
+ public final String partition;
+ public final TitReference tit;
+ public final List<String> tables;
+ public final int replicationFactor;
+ public final String currentOwner;
+
+ public PartitionInformation(String partition, TitReference tit, List<String> tables, int replicationFactor, String currentOwner) {
+ this.partition=partition;
+ this.tit=tit;
+ this.tables=tables;
+ this.replicationFactor=replicationFactor;
+ this.currentOwner=currentOwner;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java b/src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java
new file mode 100644
index 0000000..9d685cc
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/RedoHistoryElement.java
@@ -0,0 +1,15 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public final class RedoHistoryElement {
+ public final String partition;
+ public final TitReference current;
+ public final List<TitReference> previous;
+
+ public RedoHistoryElement(String partition, TitReference current, List<TitReference> previous) {
+ this.partition = partition;
+ this.current = current;
+ this.previous = previous;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java b/src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java
new file mode 100644
index 0000000..8a4923f
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/RedoRecordId.java
@@ -0,0 +1,15 @@
+package com.att.research.mdbc.mixins;
+
+public final class RedoRecordId {
+ public final String leaseId;
+ public final String commitId;
+
+ public RedoRecordId(String leaseId, String commitId) {
+ this.leaseId = leaseId;
+ this.commitId = commitId;
+ }
+
+ public boolean isEmpty() {
+ return (this.leaseId==null || this.leaseId.isEmpty())&&(this.commitId==null||this.commitId.isEmpty());
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/StagingTable.java b/src/main/java/com/att/research/mdbc/mixins/StagingTable.java
new file mode 100644
index 0000000..7da348d
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/StagingTable.java
@@ -0,0 +1,50 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.Serializable;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Set;
+import org.apache.commons.lang3.tuple.Pair;
+import org.json.JSONObject;
+
+import com.att.research.logging.EELFLoggerDelegate;
+
+public class StagingTable implements Serializable{
+ /**
+ *
+ */
+ private static final long serialVersionUID = 7583182634761771943L;
+ private transient static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(StagingTable.class);
+ private HashMap<String,Deque<Operation>> operations;
+
+ public StagingTable() {
+ operations = new HashMap<>();
+ }
+
+ synchronized public void addOperation(String key, OperationType type, String oldVal, String newVal) {
+ if(!operations.containsKey(key)) {
+ operations.put(key, new LinkedList<>());
+ }
+ operations.get(key).add(new Operation(type,newVal,oldVal));
+ }
+
+ synchronized public Deque<Pair<String,Operation>> getIterableSnapshot() throws NoSuchFieldException{
+ Deque<Pair<String,Operation>> response=new LinkedList<Pair<String,Operation>>();
+ //\TODO: check if we can just return the last change to a given key
+ Set<String> keys = operations.keySet();
+ for(String key : keys) {
+ Deque<Operation> ops = operations.get(key);
+ if(ops.isEmpty()) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Invalid state of the Operation data structure when creating snapshot");
+ throw new NoSuchFieldException("Invalid state of the operation data structure");
+ }
+ response.add(Pair.of(key,ops.getLast()));
+ }
+ return response;
+ }
+
+ synchronized public void clean() {
+ operations.clear();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java b/src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java
new file mode 100644
index 0000000..a2cf5dd
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TablePartitionInformation.java
@@ -0,0 +1,15 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public final class TablePartitionInformation {
+ public final String table;
+ public final String partition;
+ public final List<String> oldPartitions;
+
+ public TablePartitionInformation(String table, String partition, List<String> oldPartitions) {
+ this.table = table;
+ this.partition = partition;
+ this.oldPartitions = oldPartitions;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TitReference.java b/src/main/java/com/att/research/mdbc/mixins/TitReference.java
new file mode 100644
index 0000000..f27b3a0
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TitReference.java
@@ -0,0 +1,12 @@
+package com.att.research.mdbc.mixins;
+
+public final class TitReference {
+ public final String table;
+ public final String index;
+
+ public TitReference(String table, String index) {
+ this.table = table;
+ this.index= index;
+ }
+
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java b/src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java
new file mode 100644
index 0000000..1c8b799
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TransactionInformationElement.java
@@ -0,0 +1,19 @@
+package com.att.research.mdbc.mixins;
+
+import java.util.List;
+
+public final class TransactionInformationElement {
+ public final String index;
+ public final List<RedoRecordId> redoLog;
+ public final String partition;
+ public final int latestApplied;
+ public final boolean applied;
+
+ public TransactionInformationElement(String index, List<RedoRecordId> redoLog, String partition, int latestApplied, boolean applied) {
+ this.index = index;
+ this.redoLog = redoLog;
+ this.partition = partition;
+ this.latestApplied = latestApplied;
+ this.applied = applied;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java b/src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java
new file mode 100644
index 0000000..c0f7089
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/TxCommitProgress.java
@@ -0,0 +1,206 @@
+package com.att.research.mdbc.mixins;
+
+import java.math.BigInteger;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import com.att.research.logging.EELFLoggerDelegate;
+
+import java.sql.Connection;
+import java.util.concurrent.atomic.AtomicReference;
+
+
+public class TxCommitProgress{
+ private EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(TxCommitProgress.class);
+
+ private AtomicReference<BigInteger> nextCommitId;
+ private Map<String, CommitProgress> transactionInfo;
+
+ public TxCommitProgress(){
+ nextCommitId=new AtomicReference<>(BigInteger.ZERO);
+ transactionInfo = new ConcurrentHashMap<>();
+ }
+
+ public boolean containsTx(String txId) {
+ return transactionInfo.containsKey(txId);
+ }
+
+ public BigInteger getCommitId(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog.isCommitIdAssigned()) {
+ return prog.getCommitId();
+ }
+ BigInteger commitId = nextCommitId.getAndUpdate((a)-> a.add(BigInteger.ONE));
+ prog.setCommitId(commitId);
+ return commitId;
+ }
+
+ public void createNewTransactionTracker(String id, Connection conn) {
+ transactionInfo.put(id, new CommitProgress(id,conn));
+ }
+
+ public void commitRequested(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing commit request",txId);
+ }
+ prog.setCommitRequested();
+ }
+
+ public void setSQLDone(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of SQL",txId);
+ }
+ prog.setSQLCompleted();
+ }
+
+ public void setMusicDone(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when storing saving completion of Music",txId);
+ }
+ prog.setMusicCompleted();
+ }
+
+ public Connection getConnection(String txId){
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when retrieving statement",txId);
+ }
+ return prog.getConnection();
+ }
+
+ public void setRecordId(String txId, RedoRecordId recordId){
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when setting record Id",txId);
+ }
+ prog.setRecordId(recordId);
+ }
+
+ public RedoRecordId getRecordId(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when getting record Id",txId);
+ }
+ return prog.getRecordId();
+ }
+
+ public boolean isRecordIdAssigned(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking record",txId);
+ }
+ return prog.isRedoRecordAssigned();
+ }
+
+ public boolean isComplete(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when checking completion",txId);
+ }
+ return prog.isComplete();
+ }
+
+ public void reinitializeTxProgress(String txId) {
+ CommitProgress prog = transactionInfo.get(txId);
+ if(prog == null){
+ logger.error(EELFLoggerDelegate.errorLogger, "Transaction doesn't exist: [%l], failure when reinitializing tx progress",txId);
+ }
+ prog.reinitialize();
+ }
+
+ public void deleteTxProgress(String txId){
+ transactionInfo.remove(txId);
+ }
+}
+
+final class CommitProgress{
+ private String lTxId; // local transaction id
+ private BigInteger commitId; // commit id
+ private boolean commitRequested; //indicates if the user tried to commit the request already.
+ private boolean SQLDone; // indicates if SQL was already committed
+ private boolean MusicDone; // indicates if music commit was already performed, atomic bool
+ private Connection connection;// reference to a connection object. This is used to complete a commit if it failed in the original thread.
+ private Long timestamp; // last time this data structure was updated
+ private RedoRecordId redoRecordId;// record id for each partition
+
+ public CommitProgress(String id,Connection conn){
+ redoRecordId=null;
+ lTxId = id;
+ commitRequested = false;
+ SQLDone = false;
+ MusicDone = false;
+ connection = conn;
+ commitId = null;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized boolean isComplete() {
+ return commitRequested && SQLDone && MusicDone;
+ }
+
+ public synchronized void setCommitId(BigInteger commitId) {
+ this.commitId = commitId;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void reinitialize() {
+ commitId = null;
+ redoRecordId=null;
+ commitRequested = false;
+ SQLDone = false;
+ MusicDone = false;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void setCommitRequested() {
+ commitRequested = true;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void setSQLCompleted() {
+ SQLDone = true;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized void setMusicCompleted() {
+ MusicDone = true;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public Connection getConnection() {
+ timestamp = System.currentTimeMillis();
+ return connection;
+ }
+
+ public long getTimestamInMillis() {
+ return timestamp;
+ }
+
+ public synchronized void setRecordId(RedoRecordId id) {
+ redoRecordId = id;
+ timestamp = System.currentTimeMillis();
+ }
+
+ public synchronized boolean isRedoRecordAssigned() {
+ return this.redoRecordId!=null;
+ }
+
+ public synchronized RedoRecordId getRecordId() {
+ return redoRecordId;
+ }
+
+ public synchronized BigInteger getCommitId() {
+ return commitId;
+ }
+
+ public synchronized String getId() {
+ return this.lTxId;
+ }
+
+ public synchronized boolean isCommitIdAssigned() {
+ return this.commitId!= null;
+ }
+} \ No newline at end of file
diff --git a/src/main/java/com/att/research/mdbc/mixins/Utils.java b/src/main/java/com/att/research/mdbc/mixins/Utils.java
new file mode 100755
index 0000000..22df08f
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/Utils.java
@@ -0,0 +1,220 @@
+package com.att.research.mdbc.mixins;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+
+import org.json.JSONObject;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.TableInfo;
+import com.datastax.driver.core.utils.Bytes;
+
+/**
+ * Utility functions used by several of the mixins should go here.
+ *
+ * @author Robert P. Eby
+ */
+public class Utils {
+ private static EELFLoggerDelegate logger = EELFLoggerDelegate.getLogger(Utils.class);
+
+ /**
+ * Transforms and JsonObject into an array of objects
+ * @param ti information related to the table
+ * @param tbl table that jo belong to
+ * @param jo object that represents a row in the table
+ * @param musicDefaultPrimaryKeyName contains the name of key associated with the default primary key used by MUSIC, it can be null, if not requird
+ * @return array with the objects in the row
+ */
+ public static Object[] jsonToRow(TableInfo ti, String tbl, JSONObject jo, String musicDefaultPrimaryKeyName) {
+ int columnSize = ti.columns.size();
+ ArrayList<Object> rv = new ArrayList<Object>();
+ if (musicDefaultPrimaryKeyName!=null && jo.has(musicDefaultPrimaryKeyName)) {
+ rv.add(jo.getString(musicDefaultPrimaryKeyName));
+ }
+ for (int i = 0; i < columnSize; i++) {
+ String colname = ti.columns.get(i);
+ switch (ti.coltype.get(i)) {
+ case Types.BIGINT:
+ rv.add(jo.optLong(colname, 0));
+ break;
+ case Types.BOOLEAN:
+ rv.add(jo.optBoolean(colname, false));
+ break;
+ case Types.BLOB:
+ rv.add(jo.optString(colname, ""));
+ break;
+ case Types.DECIMAL:
+ rv.add(jo.optBigDecimal(colname, BigDecimal.ZERO));
+ break;
+ case Types.DOUBLE:
+ rv.add(jo.optDouble(colname, 0));
+ break;
+ case Types.INTEGER:
+ rv.add(jo.optInt(colname, 0));
+ break;
+ case Types.TIMESTAMP:
+ //rv[i] = new Date(jo.optString(colname, ""));
+ rv.add(jo.optString(colname, ""));
+ break;
+ case Types.DATE:
+ case Types.VARCHAR:
+ //Fall through
+ default:
+ rv.add(jo.optString(colname, ""));
+ break;
+ }
+ }
+ return rv.toArray();
+ }
+
+ /**
+ * Return a String equivalent of an Object. Useful for writing SQL.
+ * @param val the object to String-ify
+ * @return the String value
+ */
+ public static String getStringValue(Object val) {
+ if (val == null)
+ return "NULL";
+ if (val instanceof String)
+ return "'" + val.toString().replaceAll("'", "''") + "'"; // double any quotes
+ if (val instanceof Number)
+ return ""+val;
+ if (val instanceof ByteBuffer)
+ return "'" + Bytes.toHexString((ByteBuffer)val).substring(2) + "'"; // substring(2) is to remove the "0x" at front
+ if (val instanceof Date)
+ return "'" + (new Timestamp(((Date)val).getTime())).toString() + "'";
+ // Boolean, and anything else
+ return val.toString();
+ }
+
+ /**
+ * Parse result set and put into object array
+ * @param tbl
+ * @param rs
+ * @return
+ * @throws SQLException
+ */
+ public static ArrayList<Object[]> parseResults(TableInfo ti, ResultSet rs) throws SQLException {
+ ArrayList<Object[]> results = new ArrayList<Object[]>();
+ while (rs.next()) {
+ Object[] row = new Object[ti.columns.size()];
+ for (int i = 0; i < ti.columns.size(); i++) {
+ String colname = ti.columns.get(i);
+ switch (ti.coltype.get(i)) {
+ case Types.BIGINT:
+ row[i] = rs.getLong(colname);
+ break;
+ case Types.BOOLEAN:
+ row[i] = rs.getBoolean(colname);
+ break;
+ case Types.BLOB:
+ System.err.println("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname);
+ //logger.error("WE DO NOT SUPPORT BLOBS IN H2!! COLUMN NAME="+colname);
+ // throw an exception here???
+ break;
+ case Types.DOUBLE:
+ row[i] = rs.getDouble(colname);
+ break;
+ case Types.INTEGER:
+ row[i] = rs.getInt(colname);
+ break;
+ case Types.TIMESTAMP:
+ //rv[i] = new Date(jo.optString(colname, ""));
+ row[i] = rs.getString(colname);
+ break;
+ case Types.VARCHAR:
+ //Fall through
+ default:
+ row[i] = rs.getString(colname);
+ break;
+ }
+ }
+ results.add(row);
+ }
+ return results;
+ }
+
+ @SuppressWarnings("unused")
+ static List<Class<?>> getClassesImplementing(Class<?> implx) {
+ Properties pr = null;
+ try {
+ pr = new Properties();
+ pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties"));
+ }
+ catch (IOException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, "Could not load property file > " + e.getMessage());
+ }
+
+ List<Class<?>> list = new ArrayList<Class<?>>();
+ if (pr==null) {
+ return list;
+ }
+ String mixins = pr.getProperty("MIXINS");
+ for (String className: mixins.split("[ ,]")) {
+ try {
+ Class<?> cl = Class.forName(className.trim());
+ if (MixinFactory.impl(cl, implx)) {
+ list.add(cl);
+ }
+ } catch (ClassNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Mixin class "+className+" not found.");
+ }
+ }
+ return list;
+ }
+
+ public static void registerDefaultDrivers() {
+ Properties pr = null;
+ try {
+ pr = new Properties();
+ pr.load(Utils.class.getResourceAsStream("/mdbc_driver.properties"));
+ }
+ catch (IOException e) {
+ logger.error("Could not load property file > " + e.getMessage());
+ }
+
+ @SuppressWarnings("unused")
+ List<Class<?>> list = new ArrayList<Class<?>>();
+ String drivers = pr.getProperty("DEFAULT_DRIVERS");
+ for (String driver: drivers.split("[ ,]")) {
+ logger.info(EELFLoggerDelegate.applicationLogger, "Registering jdbc driver '" + driver + "'");
+ try {
+ @SuppressWarnings("unused")
+ Class<?> cl = Class.forName(driver.trim());
+ } catch (ClassNotFoundException e) {
+ logger.error(EELFLoggerDelegate.errorLogger,"Driver class "+driver+" not found.");
+ }
+ }
+ }
+
+ public static Properties getMdbcProperties() {
+ Properties prop = new Properties();
+ InputStream input = null;
+ try {
+ input = Utils.class.getClassLoader().getResourceAsStream("/mdbc.properties");
+ prop.load(input);
+ } catch (Exception e) {
+ logger.warn(EELFLoggerDelegate.applicationLogger, "Could load mdbc.properties."
+ + "Proceeding with defaults " + e.getMessage());
+ } finally {
+ if (input != null) {
+ try {
+ input.close();
+ } catch (IOException e) {
+ logger.error(EELFLoggerDelegate.errorLogger, e.getMessage());
+ }
+ }
+ }
+ return prop;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/mixins/package-info.java b/src/main/java/com/att/research/mdbc/mixins/package-info.java
new file mode 100755
index 0000000..edad7e8
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/mixins/package-info.java
@@ -0,0 +1,47 @@
+/**
+ * <p>
+ * This package provides the "mixins" to use when constructing a MusicSqlManager. The mixins define how MusicSqlManager
+ * will interface both to the database being mirrored (via the {@link com.att.research.mdbc.mixins.DBInterface} interface),
+ * and how it will interface to the persistence layer provided by MUSIC (via the {@link com.att.research.mdbc.mixins.MusicInterface}
+ * interface).
+ * </p>
+ * <p>
+ * The choice of which mixins to use is determined by the MusicSqlManager constructor.
+ * It will decide based upon the URL and connection properties with which it is presented (from the
+ * {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call).
+ * </p>
+ * <p>
+ * The list of mixins that may be selected from is stored in the properties files <code>mdbc.properties</code>
+ * under the name MIXINS. This implementation provides the following mixins:
+ * </p>
+ * <table summary="">
+ * <tr><th>Name</th><th>Class</th><th>Description</th></tr>
+ * <tr><td>cassandra</td><td>c.a.r.m.m.CassandraMixin</td><td>A <a href="http://cassandra.apache.org/">Cassandra</a> based
+ * persistence layer (without any of the table locking that MUSIC normally provides).</td></tr>
+ * <tr><td>cassandra2</td><td>c.a.r.m.m.Cassandra2Mixin</td><td>Similar to the <i>cassandra</i> mixin, but stores all
+ * dirty row information in one table, rather than one table per real table.</td></tr>
+ * <tr><td>h2</td><td>c.a.r.m.m.H2Mixin</td><td>This mixin provides access to either an in-memory, or a local
+ * (file-based) version of the H2 database.</td></tr>
+ * <tr><td>h2server</td><td>c.a.r.m.m.H2ServerMixin</td><td>This mixin provides access to a copy of the H2 database
+ * running as a server. Because the server needs special Java classes in order to handle certain TRIGGER actions, the
+ * server must be et up in a special way (see below).</td></tr>
+ * <tr><td>mysql</td><td>c.a.r.m.m.MySQLMixin</td><td>This mixin provides access to MySQL running on a remote server.</td></tr>
+ * </table>
+ * <h2>Starting the H2 Server</h2>
+ * <p>
+ * The H2 Server, when used with MDBC, must contain the MDBC Trigger class, and supporting libraries.
+ * This can be done as follows:
+ * </p>
+ * <pre>
+ * CLASSPATH=$PWD/target/mdbc-h2server-0.0.1-SNAPSHOT.jar
+ * CLASSPATH=$CLASSPATH:$HOME/.m2/repository/com/h2database/h2/1.3.168/h2-1.3.168.jar
+ * CLASSPATH=$CLASSPATH:$HOME/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar
+ * CLASSPATH=$CLASSPATH:$HOME/.m2/repository/org/json/json/20160810/json-20160810.jar
+ * export CLASSPATH
+ * java org.h2.tools.Server
+ * </pre>
+ * <p>
+ * The <code>mdbc-h2server-0.0.1-SNAPSHOT.jar</code> file is built with Maven using the <code>pom-h2server.xml</code> pom file.
+ * </p>
+ */
+package com.att.research.mdbc.mixins;
diff --git a/src/main/java/com/att/research/mdbc/package-info.java b/src/main/java/com/att/research/mdbc/package-info.java
new file mode 100755
index 0000000..5ad59c8
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/package-info.java
@@ -0,0 +1,87 @@
+/**
+ * <p>
+ * This package provides a JDBC driver that can be used to mirror the contents of a database to and from
+ * <a href="http://cassandra.apache.org/">Cassandra</a>. The mirroring occurs as a side effect of
+ * execute() statements against a JDBC connection, and triggers placed in the database to catch database modifications.
+ * The initial implementation is written to mirror an <a href="http://h2database.com/">H2</a> database.
+ * </p>
+ * <p>
+ * This JDBC driver will intercept all table creations, SELECTs, INSERTs, DELETEs, and UPDATEs made to the underlying
+ * database, and make sure they are copied to Cassandra. In addition, for every table XX that is created, another table
+ * DIRTY_XX will be created to communicate the existence of <i>dirty rows</i> to other Cassandra replicas (with the
+ * Cassandra2 Mixin, the table is called DIRTY____ and there is only one table). Dirty rows
+ * will be copied, as needed back into the database from Cassandra before any SELECT.
+ * </p>
+ * <h3>To use with JDBC</h3>
+ * <ol>
+ * <li>Add this jar, and all dependent jars to your CLASSPATH.</li>
+ * <li>Rewrite your JDBC URLs from <code>jdbc:h2:...</code> to <code>jdbc:mdbc:...</code>.
+ * <li>If you supply properties to the {@link java.sql.DriverManager#getConnection(String, java.util.Properties)} call,
+ * use the following optional properties to control behavior of the proxy:
+ * <table summary="">
+ * <tr><th>Property Name</th><th>Property Value</th><th>Default Value</th></tr>
+ * <tr><td>MDBC_DB_MIXIN</td><td>The mixin name to use to select the database mixin to use for this connection.</td></tr>
+ * <tr><td>MDBC_MUSIC_MIXIN</td><td>The mixin name to use to select the MUSIC mixin to use for this connection.</td></tr>
+ * <tr><td>myid</td><td>The ID of this replica in the collection of replicas sharing the same tables.</td><td>0</td></tr>
+ * <tr><td>replicas</td><td>A comma-separated list of replica names for the collection of replicas sharing the same tables.</td><td>the value of <i>myid</i></td></tr>
+ * <tr><td>music_keyspace</td><td>The keyspace name to use in Cassandra for all tables created by this instance of MDBC.</td><td>mdbc</td></tr>
+ * <tr><td>music_address</td><td>The IP address to use to connect to Cassandra.</td><td>localhost</td></tr>
+ * <tr><td>music_rfactor</td><td>The replication factor to use for the new keyspace that is created.</td><td>2</td></tr>
+ * <tr><td>disabled</td><td>If set to <i>true</i> the mirroring is completely disabled; this is the equivalent of using the database driver directly.</td><td>false</td></tr>
+ * </table>
+ * </li>
+ * <li>Load the driver using the following call:
+ * <pre>
+ * Class.forName("com.att.research.mdbc.ProxyDriver");
+ * </pre></li>
+ * </ol>
+ * <p>Because, under the current design, the MDBC driver must be running within the same JVM as the database, MDBC
+ * will only explicitly support in-memory databases (URL of <code>jdbc:mdbc:mem:...</code>), or local file
+ * databases (URL of <code>jdbc:mdbc:/path/to/file</code>). Attempts to access a remote H2 server (URL
+ * <code>jdbc:mdbc:tcp://host/path/to/db</code>) will probably not work, although MDBC will not stop you from trying.
+ * </p>
+ *
+ * <h3>To Define a Tomcat DataSource Resource</h3>
+ * <p>The following code snippet can be used as a guide when setting up a Tomcat DataSource Resource.
+ * This snippet goes in the <i>server.xml</i> file. The items in <b>bold</b> indicate changed or new items:</p>
+ * <pre>
+ * &lt;Resource name="jdbc/ProcessEngine"
+ * auth="Container"
+ * type="javax.sql.DataSource"
+ * factory="org.apache.tomcat.jdbc.pool.DataSourceFactory"
+ * uniqueResourceName="process-engine"
+ * driverClassName="<b>com.att.research.mdbc.ProxyDriver</b>"
+ * url="jdbc:<b>mdbc</b>:./camunda-h2-dbs/process-engine;MVCC=TRUE;TRACE_LEVEL_FILE=0;DB_CLOSE_ON_EXIT=FALSE"
+ * <b>connectionProperties="myid=0;replicas=0,1,2;music_keyspace=camunda;music_address=localhost"</b>
+ * username="sa"
+ * password="sa"
+ * maxActive="20"
+ * minIdle="5" /&gt;
+ * </pre>
+ *
+ * <h3>To Define a JBoss DataSource</h3>
+ * <p>The following code snippet can be used as a guide when setting up a JBoss DataSource.
+ * This snippet goes in the <i>service.xml</i> file. The items in <b>bold</b> indicate changed or new items:</p>
+ * <pre>
+ * &lt;datasources&gt;
+ * &lt;datasource jta="true" jndi-name="java:jboss/datasources/ProcessEngine" pool-name="ProcessEngine" enabled="true" use-java-context="true" use-ccm="true"&gt;
+ * &lt;connection-url&gt;jdbc:<b>mdbc</b>:/opt/jboss-eap-6.2.4/standalone/camunda-h2-dbs/process-engine;DB_CLOSE_DELAY=-1;MVCC=TRUE;DB_CLOSE_ON_EXIT=FALSE&lt;/connection-url&gt;
+ * <b>&lt;connection-property name="music_keyspace"&gt;
+ * camunda
+ * &lt;/connection-property&gt;</b>
+ * &lt;driver&gt;mdbc&lt;/driver&gt;
+ * &lt;security&gt;
+ * &lt;user-name&gt;sa&lt;/user-name&gt;
+ * &lt;password&gt;sa&lt;/password&gt;
+ * &lt;/security&gt;
+ * &lt;/datasource&gt;
+ * &lt;drivers&gt;
+ * <b>&lt;driver name="mdbc" module="com.att.research.mdbc"&gt;
+ * &lt;driver-class&gt;com.att.research.mdbc.ProxyDriver&lt;/driver-class&gt;
+ * &lt;/driver&gt;</b>
+ * &lt;/drivers&gt;
+ * &lt;/datasources&gt;
+ * </pre>
+ * <p>Note: This assumes that you have built and installed the <b>com.att.research.mdbc</b> module within JBoss.
+ */
+package com.att.research.mdbc;
diff --git a/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java b/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java
new file mode 100644
index 0000000..721b389
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/ConnectionTest.java
@@ -0,0 +1,419 @@
+package com.att.research.mdbc.tests;
+
+//import java.sql.Connection;
+//import java.sql.DriverManager;
+//import java.sql.PreparedStatement;
+//import java.sql.ResultSet;
+//import java.sql.SQLException;
+//import java.sql.Statement;
+//import java.util.HashSet;
+//import java.util.Properties;
+//import java.util.Set;
+//
+//import org.h2.tools.Server;
+//import org.junit.After;
+//import org.junit.AfterClass;
+//import org.junit.Before;
+//import org.junit.BeforeClass;
+//import org.junit.Test;
+//import org.slf4j.Logger;
+//import org.slf4j.LoggerFactory;
+//
+//import com.mysql.jdbc.jdbc2.optional.MysqlDataSource;
+
+
+//@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+//@RunWith(ConcurrentTestRunner.class)
+public class ConnectionTest {
+//
+//// static {
+//// System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "INFO");
+//// System.setProperty(org.slf4j.impl.SimpleLogger.LOG_FILE_KEY, String.format("ComparativeAnalysisTest-%d.log", System.currentTimeMillis()));
+//// }
+// private static final Logger LOG = LoggerFactory.getLogger(ConnectionTest.class);
+//
+// Set<Thread> runningThreads = new HashSet<Thread>();
+//
+// @BeforeClass
+// public static void setUpBeforeClass() throws Exception {
+//
+// }
+//
+// @AfterClass
+// public static void tearDownAfterClass() throws Exception {
+//
+// }
+//
+// @Before
+// public void setUp() throws Exception {
+//
+// }
+//
+// @After
+// public void tearDown() throws Exception {
+//
+// }
+//
+// //@Test
+// public void test01() {
+// System.out.println("TEST 1: Getting ready for testing connection to Cassandra");
+//
+// final CassandraConnector client = new CassandraConnector();
+// final String ipAddress = "localhost";
+// final int port = 9042;
+// LOG.info("Connecting to IP Address " + ipAddress + ":" + port + "...");
+// client.connect(ipAddress, port);
+// client.close();
+// System.out.println();
+// }
+//
+// /**
+// * Tests for using jdbc as well as mdbc. In order to use, must have mysql and
+// * running locally. Must have a database EMP created in the
+// * mysql db. Uses "Driver.getConnection(com.mysql.jdbc.Driver)" for jdbc connection
+// *
+// */
+// //@Test
+// public void test02() {
+// System.out.println("TEST 2: Getting ready for testing connection via jdbc");
+// // JDBC driver name and database URL
+// final String JDBC_DRIVER = "com.mysql.jdbc.Driver";
+// final String DB_URL = "jdbc:mysql://localhost/EMP";
+//
+// // Database credentials
+// final String USER = "alice";
+// final String PASS = "bob";
+// Properties connectionProps = new Properties();
+// connectionProps.put("user", USER);
+// connectionProps.put("password", PASS);
+//
+// System.out.println("Connecting directly to database...");
+// connectViaDriverManager(JDBC_DRIVER, DB_URL, connectionProps);
+// System.out.println();
+// }
+//
+// /**
+// * Performs same test as @test02() except this test uses mdbc.
+// *
+// * In order to use, must have mysql and Cassandra services running locally. Must
+// * have a database EMP created in the mysql db. Uses
+// * "Driver.getConnection(com.att.research.mdbc.ProxyDriver)" for mdbc
+// * connection
+// */
+// //@Test
+// public void test03() {
+// System.out.println("TEST 3: Getting ready for testing connection via mdbc");
+// // Database credentials
+// final String USER = "alice";
+// final String PASS = "bob";
+// Properties connectionProps = new Properties();
+// connectionProps.put("user", USER);
+// connectionProps.put("password", PASS);
+//
+// final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver";
+// final String MDBC_DB_URL = "jdbc:mdbc://localhost/TEST";
+// final String MDBC_DB_MIXIN = "mysql";
+// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN);
+//
+// System.out.println("Connecting to database via mdbc");
+// connectViaDriverManager(MDBC_DRIVER, MDBC_DB_URL, connectionProps);
+// System.out.println();
+// }
+//
+// /**
+// * Performs same test as @test02() except this test uses mdbc.
+// *
+// * In order to use, must have mysql and Cassandra services running locally. Must
+// * have a database EMP created in the mysql db. Uses
+// * "Driver.getConnection(com.att.research.mdbc.ProxyDriver)" for mdbc
+// * connection
+// *
+// * Uses preparedStatements
+// */
+// //@Test
+// public void test03point5() {
+// System.out.println("TEST 3.5: Getting ready for testing connection via mdbc w/ PreparedStatement");
+// // Database credentials
+// final String USER = "alice";
+// final String PASS = "bob";
+// Properties connectionProps = new Properties();
+// connectionProps.put("user", USER);
+// connectionProps.put("password", PASS);
+//
+// final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver";
+// final String MDBC_DB_URL = "jdbc:mdbc://localhost/EMP";
+// //final String MDBC_DRIVER = "org.h2.Driver";
+// //final String MDBC_DB_URL = "jdbc:h2:tcp://localhost:9092/~/test";
+// final String MDBC_DB_MIXIN = "mysql";
+// connectionProps.put("MDBC_DB_MIXIN", MDBC_DB_MIXIN);
+//
+// System.out.println("Connecting to database via mdbc");
+// Connection conn = null;
+// PreparedStatement stmt = null;
+// try {
+// //STEP 2: Register JDBC driver
+// Class.forName(MDBC_DRIVER);
+//
+// //STEP 3: Open a connection
+// conn = DriverManager.getConnection(MDBC_DB_URL, connectionProps);
+// conn.setAutoCommit(false);
+//
+// //STEP 4: Execute a query
+// System.out.println("Inserting into DB");
+// stmt = conn.prepareStatement("INSERT INTO EMPLOYEE (id, first, last, age) VALUES (?, ?, ?, ?)");
+// stmt.setString(1, null);
+// stmt.setString(2, "John");
+// stmt.setString(3, "Smith");
+// stmt.setInt(4, 20);
+// stmt.execute();
+//
+// System.out.println("Inserting again into DB");
+// stmt.setString(2, "Jane");
+// stmt.setInt(4, 30);
+// stmt.execute();
+//
+// stmt.close();
+//
+// conn.commit();
+//
+// System.out.println("Querying the DB");
+// stmt = conn.prepareStatement("SELECT id, first, last, age FROM EMPLOYEE WHERE age < ?");
+// stmt.setInt(1, 25);
+// ResultSet rs = stmt.executeQuery();
+// //STEP 5: Extract data from result set
+// while(rs.next()) {
+// //Retrieve by column name
+// int id = rs.getInt("id");
+// int age = rs.getInt("age");
+// String first = rs.getString("first");
+// String last = rs.getString("last");
+//
+// //Display values
+// //*
+// System.out.print("ID: " + id);
+// System.out.print(", Age: " + age);
+// System.out.print(", First: " + first);
+// System.out.println(", Last: " + last);
+// //*/
+// }
+//
+// System.out.println("Querying again");
+// stmt.setInt(1, 35);
+// rs = stmt.executeQuery();
+// //STEP 5: Extract data from result set
+// while(rs.next()) {
+// //Retrieve by column name
+// int id = rs.getInt("id");
+// int age = rs.getInt("age");
+// String first = rs.getString("first");
+// String last = rs.getString("last");
+//
+// //Display values
+// //*
+// System.out.print("ID: " + id);
+// System.out.print(", Age: " + age);
+// System.out.print(", First: " + first);
+// System.out.println(", Last: " + last);
+// //*/
+// }
+//
+//
+// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\"";
+// //stmt.execute(sql);
+//
+// //sql = "DROP TABLE IF EXISTS EMPLOYEE";
+// //stmt.execute(sql);
+//
+// //STEP 6: Clean-up environment
+// rs.close();
+// stmt.close();
+// conn.close();
+// } catch(SQLException se) {
+// //Handle errors for JDBC
+// se.printStackTrace();
+// } catch (Exception e) {
+// //Handle errors for Class.forName
+// e.printStackTrace();
+// } finally {
+// //finally block used to close resources
+// try {
+// if(stmt!=null)
+// stmt.close();
+// } catch(SQLException se2) {
+// }
+// try {
+// if(conn!=null)
+// conn.close();
+// } catch(SQLException se) {
+// se.printStackTrace();
+// }
+// }
+// System.out.println("Done");
+// }
+//
+//
+// /**
+// * Connects to a generic database. Can be used for mdbc or jdbc
+// * @param DBC_DRIVER the driver for which to register (Class.forName(DBC_DRIVER))
+// * @param DB_URL the URL for the database we are testing
+// * @param connectionProps
+// */
+// private void connectViaDriverManager(final String DBC_DRIVER, final String DB_URL, Properties connectionProps) {
+// Connection conn = null;
+// Statement stmt = null;
+// try {
+//
+// //Server server = Server.createTcpServer("-tcpAllowOthers").start();
+// //STEP 2: Register JDBC driver
+// Class.forName(DBC_DRIVER);
+//
+// //STEP 3: Open a connection
+// conn = DriverManager.getConnection(DB_URL, connectionProps);
+// conn.setAutoCommit(false);
+//
+// //STEP 4: Execute a query
+// stmt = conn.createStatement();
+// String sql;
+//
+// //sql = "DROP TABLE EMPLOYEE";
+// //stmt.execute(sql);
+//
+// sql = "CREATE TABLE IF NOT EXISTS EMPLOYEE (id INT primary key, first VARCHAR(20), last VARCHAR(20), age INT);";
+// stmt.execute(sql);
+//
+// sql = "INSERT INTO EMPLOYEE (id, first, last, age) VALUES (\"34\", \"Jane4\", \"Doe4\", \"40\")";
+// stmt.execute(sql);
+//
+// sql = "SELECT id, first, last, age FROM EMPLOYEE";
+// ResultSet rs = stmt.executeQuery(sql);
+//
+// //STEP 5: Extract data from result set
+// while(rs.next()) {
+// //Retrieve by column name
+// int id = rs.getInt("id");
+// int age = rs.getInt("age");
+// String first = rs.getString("first");
+// String last = rs.getString("last");
+//
+// //Display values
+// //*
+// System.out.print("ID: " + id);
+// System.out.print(", Age: " + age);
+// System.out.print(", First: " + first);
+// System.out.println(", Last: " + last);
+// //*/
+//
+// }
+// //sql = "DELETE FROM EMPLOYEE WHERE first = \"John\" and last = \"Smith\"";
+// //stmt.execute(sql);
+//
+// //sql = "DROP TABLE IF EXISTS EMPLOYEE";
+// //stmt.execute(sql);
+//
+// conn.commit();
+//
+// //STEP 6: Clean-up environment
+// rs.close();
+// stmt.close();
+// conn.close();
+// } catch(SQLException se) {
+// //Handle errors for JDBC
+// se.printStackTrace();
+// } catch (Exception e) {
+// //Handle errors for Class.forName
+// e.printStackTrace();
+// } finally {
+// //finally block used to close resources
+// try {
+// if(stmt!=null)
+// stmt.close();
+// } catch(SQLException se2) {
+// }
+// try {
+// if(conn!=null)
+// conn.close();
+// } catch(SQLException se) {
+// se.printStackTrace();
+// }
+// }
+// }
+//
+//
+//
+// /**
+// * Must be mysql datasource
+// * @throws Exception
+// */
+// //@Test
+// public void test04() throws Exception {
+// String dbConnectionName = "testing";
+// String dbUserId = "alice";
+// String dbPasswd = "bob";
+// String db_url = "jdbc:mysql://localhost/EMP";
+// MysqlDataSource dataSource = new MysqlDataSource();
+// dataSource.setUser(dbUserId);
+// dataSource.setPassword(dbPasswd);
+// dataSource.setURL(db_url);
+//
+//
+// Connection con = dataSource.getConnection();
+// Statement st = con.createStatement();
+// ResultSet rs = null;
+//
+// //FIXME CREATE EMPLOYEE TABLE
+//
+// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) {
+// rs = st.getResultSet();
+// }
+//
+// rs = st.executeQuery("select * from EMPLOYEE;");
+// while (rs.next()) {
+// System.out.println(rs.getString("name"));
+// }
+//
+// if (st.execute("DELETE FROM EMPLOYEE")) {
+// rs = st.getResultSet();
+// }
+// rs.close();
+// st.close();
+// con.close();
+// }
+//
+// /**
+// * Test connection to mysql datasource class
+// * @throws Exception
+// */
+// @Test
+// public void test05() throws Exception {
+// String dbConnectionName = "testing";
+// String dbUserId = "alice";
+// String dbPasswd = "bob";
+// String db_url = "jdbc:mdbc://localhost/EMP";
+// String db_type = "mysql";
+// MdbcDataSource dataSource = new MdbcDataSource();
+// dataSource.setUser(dbUserId);
+// dataSource.setPassword(dbPasswd);
+// dataSource.setURL(db_url);
+// dataSource.setDBType(db_type);
+//
+// Connection con = dataSource.getConnection();
+// Statement st = con.createStatement();
+// ResultSet rs = null;
+//
+// if (st.execute("insert into EMPLOYEE values (\"John Doe\");")) {
+// rs = st.getResultSet();
+// }
+//
+// rs = st.executeQuery("select * from EMPLOYEE;");
+// while (rs.next()) {
+// System.out.println(rs.getString("name"));
+// }
+//
+// if (st.execute("DELETE FROM EMPLOYEE")) {
+// rs = st.getResultSet();
+// }
+// rs.close();
+// st.close();
+// con.close();
+// }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/MAIN.java b/src/main/java/com/att/research/mdbc/tests/MAIN.java
new file mode 100755
index 0000000..164b088
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/MAIN.java
@@ -0,0 +1,106 @@
+package com.att.research.mdbc.tests;
+
+import java.io.FileInputStream;
+import java.io.InputStream;
+import java.lang.reflect.Constructor;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.json.JSONTokener;
+
+/**
+ * Run all the tests against all the configurations specified in /tests.json.
+ *
+ * @author Robert Eby
+ */
+public class MAIN {
+ public static final String CONFIG = "/tests.json";
+
+ /**
+ * This class runs all the tests against all the configurations specified in /tests.json.
+ * It assumes that a copy of Cassandra is running locally on port 9042, that a copy of H2
+ * server is is running locally on port 8082, and that a copy of MySQL is running locally
+ * on port 3306. These can be adjusted by editing the /tests.json file.
+ *
+ * @param args command line arguments
+ * @throws Exception if anything goes wrong
+ */
+ public static void main(String[] args) throws Exception {
+ new MAIN(args).run();
+ System.exit(0);
+ }
+
+ private JSONArray configs;
+ private List<Test> tests;
+ private int total_success, total_failure;
+
+ public MAIN(String[] args) throws Exception {
+ configs = null;
+ tests = new ArrayList<Test>();
+ total_success = total_failure = 0;
+
+ InputStream is = null;
+ if (args.length == 0) {
+ is = this.getClass().getResourceAsStream(CONFIG);
+ } else {
+ is = new FileInputStream(args[0]);
+ }
+ if (is != null) {
+ JSONObject jo = new JSONObject(new JSONTokener(is));
+ is.close();
+ configs = jo.getJSONArray("configs");
+
+ JSONArray ja = jo.getJSONArray("tests");
+ for (int i = 0; i < ja.length(); i++) {
+ Class<?> cl = Class.forName(ja.getString(i).trim());
+ if (cl != null) {
+ Constructor<?> con = cl.getConstructor();
+ tests.add((Test) con.newInstance());
+ }
+ }
+ } else {
+ String conf = (args.length == 0) ? CONFIG : args[0];
+ throw new Exception("Cannot find configuration resource: "+conf);
+ }
+ }
+ public void run() {
+ Logger logger = Logger.getLogger(this.getClass());
+ for (int ix = 0; ix < configs.length(); ix++) {
+ JSONObject config = configs.getJSONObject(ix);
+ int succ = 0, fail = 0;
+ logger.info("*** Testing with configuration: "+config.getString("description"));
+ System.out.println("Testing with configuration: "+config.getString("description"));
+ for (Test t : tests) {
+ String nm = t.getName() + " ............................................................";
+ System.out.print(" Test: "+nm.substring(0, 60));
+ try {
+ List<String> msgs = t.run(config);
+ if (msgs == null || msgs.size() == 0) {
+ succ++;
+ System.out.println(" OK!");
+ } else {
+ fail++;
+ System.out.println(" Fail!");
+ System.out.flush();
+ for (String m : msgs) {
+ System.out.println(" "+m);
+ }
+ System.out.flush();
+ }
+ } catch (Exception x) {
+ fail++;
+ System.out.println(" Fail!");
+ }
+ }
+ System.out.println();
+ total_success += succ;
+ total_failure += fail;
+ }
+ String m = "Testing completed: "+total_success+" successful tests, "+total_failure+": failures.";
+ logger.info(m);
+ System.out.println(m);
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test.java b/src/main/java/com/att/research/mdbc/tests/Test.java
new file mode 100755
index 0000000..0b8c0ab
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test.java
@@ -0,0 +1,105 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Properties;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Provides the abstract interface for a Test, as well as some common functions.
+ *
+ * @author Robert Eby
+ */
+public abstract class Test {
+ public static final String MDBC_DRIVER = "com.att.research.mdbc.ProxyDriver";
+
+ /**
+ * Each test derived from this class must implement this method,
+ * which runs the test and produces a list of error messages.
+ *
+ * @param config a JSONObject describing the configuration to use for this run of the test
+ * @return the list of messages. If the list is empty, the test is considered to have run
+ * successfully.
+ */
+ abstract public List<String> run(JSONObject config);
+
+ public String getName() {
+ String s = this.getClass().getName();
+ return s.replaceAll("com.att.research.mdbc.tests.", "");
+ }
+
+ public Properties buildProperties(JSONObject config, int i) {
+ Properties p = new Properties();
+ for (String key : config.keySet()) {
+ if (key.equals("connections")) {
+ JSONArray ja = config.getJSONArray("connections");
+ JSONObject connection = ja.getJSONObject(i);
+ for (String key2 : connection.keySet()) {
+ p.setProperty(key2, connection.getString(key2));
+ }
+ } else {
+ p.setProperty(key, config.getString(key));
+ }
+ }
+ return p;
+ }
+
+ public Connection getDBConnection(Properties pr) throws SQLException, ClassNotFoundException {
+ Class.forName(MDBC_DRIVER);
+ String url = pr.getProperty("url");
+ return DriverManager.getConnection(url, pr);
+ }
+
+ public void assertNotNull(Object o) throws Exception {
+ if (o == null)
+ throw new Exception("Object is null");
+ }
+
+ public void assertTableContains(int connid, Connection conn, String tbl, Object... kv) throws Exception {
+ ResultSet rs = getRow(conn, tbl, kv);
+ boolean throwit = !rs.next();
+ rs.close();
+ if (throwit) {
+ throw new Exception("Conn id "+connid+" Table "+tbl+" does not have a row with "+catkeys(kv));
+ }
+ }
+ public void assertTableDoesNotContain(int connid, Connection conn, String tbl, Object... kv) throws Exception {
+ boolean throwit = true;
+ try {
+ assertTableContains(connid, conn, tbl, kv);
+ } catch (Exception x) {
+ throwit = false;
+ }
+ if (throwit) {
+ throw new Exception("Conn id "+connid+" Table "+tbl+" does have a row with "+catkeys(kv));
+ }
+ }
+ public ResultSet getRow(Connection conn, String tbl, Object... kv) throws SQLException {
+ Statement stmt = conn.createStatement();
+ StringBuilder sql = new StringBuilder("SELECT * FROM ")
+ .append(tbl)
+ .append(" WHERE ")
+ .append(catkeys(kv));
+ return stmt.executeQuery(sql.toString());
+ }
+ public String catkeys(Object... kv) {
+ StringBuilder sql = new StringBuilder();
+ String pfx = "";
+ for (int i = 0; (i+1) < kv.length; i += 2) {
+ sql.append(pfx).append(kv[i]).append("=");
+ if (kv[i+1] instanceof String) {
+ sql.append("'").append(kv[i+1]).append("'");
+ } else {
+ sql.append(kv[i+1].toString());
+ }
+ pfx = " AND ";
+ }
+ return sql.toString();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Delete.java b/src/main/java/com/att/research/mdbc/tests/Test_Delete.java
new file mode 100755
index 0000000..8017cb3
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test_Delete.java
@@ -0,0 +1,70 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Test that DELETEs work on the original DB, and are correctly copied to replica DBs.
+ *
+ * @author Robert Eby
+ */
+public class Test_Delete extends Test {
+ private final String TBL = "DELTABLE";
+
+ @Override
+ public List<String> run(JSONObject config) {
+ List<String> msgs = new ArrayList<String>();
+ JSONArray connections = config.getJSONArray("connections");
+ Connection[] conn = new Connection[connections.length()];
+ Statement[] stmt = new Statement[conn.length];
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i] = getDBConnection(buildProperties(config, i));
+ assertNotNull(conn[i]);
+ stmt[i] = conn[i].createStatement();
+ assertNotNull(stmt[i]);
+ }
+
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i].setAutoCommit(true);
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS DELTABLE(ID_ varchar(255), RANDOMTXT varchar(255), primary key (ID_))");
+ }
+ stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('1', 'Everything''s Negotiable Except Cutting Medicaid')");
+ stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('2', 'Can a Sideways Elevator Help Designers Build Taller Skyscrapers?')");
+ stmt[0].execute("INSERT INTO DELTABLE(ID_, RANDOMTXT) VALUES('3', 'Can a Bernie Sanders Ally Win the Maryland Governor''s Mansion?')");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], TBL, "ID_", "1");
+ assertTableContains(i, conn[i], TBL, "ID_", "2");
+ assertTableContains(i, conn[i], TBL, "ID_", "3");
+ }
+
+ stmt[0].execute("DELETE FROM DELTABLE WHERE ID_ = '1'");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1");
+ assertTableContains(i, conn[i], TBL, "ID_", "2");
+ assertTableContains(i, conn[i], TBL, "ID_", "3");
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ } finally {
+ for (int i = 0; i < stmt.length; i++) {
+ if (stmt[i] != null)
+ stmt[i].close();
+ }
+ for (int i = 0; i < conn.length; i++) {
+ if (conn[i] != null)
+ conn[i].close();
+ }
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ }
+ return msgs;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Insert.java b/src/main/java/com/att/research/mdbc/tests/Test_Insert.java
new file mode 100755
index 0000000..4c19dbd
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test_Insert.java
@@ -0,0 +1,94 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Test that INSERTs work to the original DB, and are correctly copied to replica DBs.
+ *
+ * @author Robert Eby
+ */
+public class Test_Insert extends Test {
+ private final String PERSON = "PERSON";
+ private final String SONG = "SONG";
+
+ @Override
+ public List<String> run(JSONObject config) {
+ List<String> msgs = new ArrayList<String>();
+ JSONArray connections = config.getJSONArray("connections");
+ Connection[] conn = new Connection[connections.length()];
+ Statement[] stmt = new Statement[conn.length];
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i] = getDBConnection(buildProperties(config, i));
+ assertNotNull(conn[i]);
+ stmt[i] = conn[i].createStatement();
+ assertNotNull(stmt[i]);
+ }
+
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i].setAutoCommit(true);
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS PERSON(ID_ varchar(255), NAME varchar(255), SSN varchar(255), primary key (ID_))");
+ }
+ stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('1', 'Zaphod', '111-22-3333')");
+ stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('2', 'Ripley', '444-55-6666')");
+ stmt[0].execute("INSERT INTO PERSON(ID_, NAME, SSN) VALUES('3', 'Spock', '777-88-9999')");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], PERSON, "ID_", "1");
+ assertTableContains(i, conn[i], PERSON, "ID_", "2");
+ assertTableContains(i, conn[i], PERSON, "ID_", "3");
+ }
+
+ stmt[0].execute("UPDATE PERSON SET NAME = 'Jabba' WHERE ID_ = '2'");
+ for (int i = 0; i < conn.length; i++) {
+ ResultSet rs = getRow(conn[i], PERSON, "ID_", "2");
+ if (rs.next()) {
+ String v = rs.getString("NAME");
+ if (!v.equals("Jabba"))
+ throw new Exception("Table PERSON, row with ID_ = '2' was not updated.");
+ } else {
+ throw new Exception("Table PERSON does not have a row with ID_ = '2'");
+ }
+ rs.close();
+ }
+
+ for (int i = 0; i < conn.length; i++) {
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS SONG(ID_ varchar(255), PREF int, ARIA varchar(255), primary key (ID_, PREF))");
+ }
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('1', 1, 'Nessun Dorma')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 5, 'O mio Bambino Caro')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('2', 2, 'Sweet Georgia Brown')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 77, 'Mud Flats Blues')");
+ stmt[0].execute("INSERT INTO SONG(ID_, PREF, ARIA) VALUES('3', 69, 'Me & Mr Jones')");
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], SONG, "ID_", "1", "PREF", 1);
+ assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 5);
+ assertTableContains(i, conn[i], SONG, "ID_", "2", "PREF", 2);
+ assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 77);
+ assertTableContains(i, conn[i], SONG, "ID_", "3", "PREF", 69);
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ } finally {
+ for (int i = 0; i < stmt.length; i++) {
+ if (stmt[i] != null)
+ stmt[i].close();
+ }
+ for (int i = 0; i < conn.length; i++) {
+ if (conn[i] != null)
+ conn[i].close();
+ }
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ }
+ return msgs;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java b/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java
new file mode 100755
index 0000000..1153c9b
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/Test_Transactions.java
@@ -0,0 +1,74 @@
+package com.att.research.mdbc.tests;
+
+import java.sql.Connection;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+/**
+ * Test that transactions work between the original DB, and replica DBs.
+ *
+ * @author Robert Eby
+ */
+public class Test_Transactions extends Test {
+ private final String TBL = "TRANSTEST";
+
+ @Override
+ public List<String> run(JSONObject config) {
+ List<String> msgs = new ArrayList<String>();
+ JSONArray connections = config.getJSONArray("connections");
+ Connection[] conn = new Connection[connections.length()];
+ Statement[] stmt = new Statement[conn.length];
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i] = getDBConnection(buildProperties(config, i));
+ assertNotNull(conn[i]);
+ stmt[i] = conn[i].createStatement();
+ assertNotNull(stmt[i]);
+ }
+
+ try {
+ for (int i = 0; i < conn.length; i++) {
+ conn[i].setAutoCommit(true);
+ stmt[i].execute("CREATE TABLE IF NOT EXISTS TRANSTEST(ID_ varchar(12), STUFF varchar(255), primary key (ID_))");
+ conn[i].setAutoCommit(false);
+ }
+ stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('1', 'CenturyLink Now Under Fire on All Sides For Fraudulent Billing')");
+ stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('2', 'Netflix Now in Half of All Broadband Households, Study Says')");
+ stmt[0].execute("INSERT INTO TRANSTEST(ID_, STUFF) VALUES('3', 'Private Data Of 6 Million Verizon Customers Exposed')");
+ assertTableContains(0, conn[0], TBL, "ID_", "1");
+ assertTableContains(0, conn[0], TBL, "ID_", "2");
+ assertTableContains(0, conn[0], TBL, "ID_", "3");
+ for (int i = 1; i < conn.length; i++) {
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "1");
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "2");
+ assertTableDoesNotContain(i, conn[i], TBL, "ID_", "3");
+ }
+ conn[0].commit();
+ for (int i = 0; i < conn.length; i++) {
+ assertTableContains(i, conn[i], TBL, "ID_", "1");
+ assertTableContains(i, conn[i], TBL, "ID_", "2");
+ assertTableContains(i, conn[i], TBL, "ID_", "3");
+ }
+
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ } finally {
+ for (int i = 0; i < stmt.length; i++) {
+ if (stmt[i] != null)
+ stmt[i].close();
+ }
+ for (int i = 0; i < conn.length; i++) {
+ if (conn[i] != null)
+ conn[i].close();
+ }
+ }
+ } catch (Exception e) {
+ msgs.add(e.toString());
+ }
+ return msgs;
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tests/package-info.java b/src/main/java/com/att/research/mdbc/tests/package-info.java
new file mode 100755
index 0000000..ee993db
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tests/package-info.java
@@ -0,0 +1,165 @@
+/**
+ * <p>
+ * This package provides a testing harness to test the various features of MDBC against
+ * multiple combinations of database and MUSIC mixins. The configurations (consisting of
+ * database information and mixin combinations) to test, as well as the specific tests to
+ * run are all defined in the configuration file <code>test.json</code>.
+ * </p>
+ * <p>
+ * To run the tests against all the configurations specified in /tests.json, do the following:
+ * </p>
+ * <pre>
+ * java com.att.research.mdbc.tests.MAIN [ configfile ]
+ * </pre>
+ * <p>
+ * It is assumed that a copy of Cassandra is running locally on port 9042,
+ * that a copy of H2 server is is running locally on port 8082,
+ * and that a copy of MySQL (or MariaDB) is running locally on port 3306.
+ * These can be adjusted by editing the /tests.json file.
+ * </p>
+ * <p>
+ * When building a copy of MDBC for production use, this package can be safely removed.
+ * </p>
+ * <p>
+ * The initial copy of <i>tests.json</i> is as follows:
+ * </p>
+ * <pre>
+ * {
+ * "tests": [
+ * "com.att.research.mdbc.tests.Test_Insert",
+ * "com.att.research.mdbc.tests.Test_Delete",
+ * "com.att.research.mdbc.tests.Test_Transactions"
+ * ],
+ * "configs": [
+ * {
+ * "description": "H2 with Cassandra with two connections",
+ * "MDBC_DB_MIXIN": "h2",
+ * "MDBC_MUSIC_MIXIN": "cassandra",
+ * "replicas": "0,1",
+ * "music_keyspace": "mdbctest1",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc:mem:db0",
+ * "user": "",
+ * "password": "",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc:mem:db1",
+ * "user": "",
+ * "password": "",
+ * "myid": "1"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "H2 with Cassandra2 with three connections",
+ * "MDBC_DB_MIXIN": "h2",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1,2",
+ * "music_keyspace": "mdbctest2",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "user": "",
+ * "password": "",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc:mem:db0",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc:mem:db1",
+ * "myid": "1"
+ * },
+ * {
+ * "name": "Connection 2",
+ * "url": "jdbc:mdbc:mem:db2",
+ * "myid": "2"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "H2 Server with Cassandra2 with two connections",
+ * "MDBC_DB_MIXIN": "h2server",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1",
+ * "music_keyspace": "mdbctest3",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc:tcp://localhost/mdbc0",
+ * "user": "",
+ * "password": "",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc:tcp://localhost/mdbc1",
+ * "user": "",
+ * "password": "",
+ * "myid": "1"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "MySQL with Cassandra2 with two connections",
+ * "MDBC_DB_MIXIN": "mysql",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1,2",
+ * "music_keyspace": "mdbctest4",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "user": "root",
+ * "password": "abc123",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "url": "jdbc:mdbc://127.0.0.1:3306/mdbc",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "url": "jdbc:mdbc://127.0.0.1:3306/mdbc2",
+ * "myid": "1"
+ * }
+ * ]
+ * },
+ * {
+ * "description": "H2 (DB #1) and MySQL (DB #2) with Cassandra2",
+ * "MDBC_MUSIC_MIXIN": "cassandra2",
+ * "replicas": "0,1",
+ * "music_keyspace": "mdbctest5",
+ * "music_address": "localhost",
+ * "music_rfactor": "1",
+ * "connections": [
+ * {
+ * "name": "Connection 0",
+ * "MDBC_DB_MIXIN": "h2",
+ * "url": "jdbc:mdbc:mem:db9",
+ * "user": "",
+ * "password": "",
+ * "myid": "0"
+ * },
+ * {
+ * "name": "Connection 1",
+ * "MDBC_DB_MIXIN": "mysql",
+ * "url": "jdbc:mdbc://127.0.0.1:3306/mdbc3",
+ * "user": "root",
+ * "password": "abc123",
+ * "myid": "1"
+ * }
+ * ]
+ * }
+ * ]
+ * }
+ * </pre>
+ */
+package com.att.research.mdbc.tests;
diff --git a/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java b/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java
new file mode 100644
index 0000000..555b863
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tools/CreateNodeConfigurations.java
@@ -0,0 +1,71 @@
+package com.att.research.mdbc.tools;
+
+import com.att.research.exceptions.MDBCServiceException;
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import com.att.research.mdbc.configurations.TablesConfiguration;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import org.onap.music.main.MusicPureCassaCore;
+
+import java.io.FileNotFoundException;
+import java.util.List;
+
+public class CreateNodeConfigurations {
+ public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreateNodeConfigurations.class);
+
+ private String tables;
+ @Parameter(names = { "-t", "--table-configurations" }, required = true,
+ description = "This is the input file that is going to have the configuration for all the tables and partitions")
+ private String tableConfigurationsFile;
+ @Parameter(names = { "-b", "--basename" }, required = true,
+ description = "This base name for all the outputs files that are going to be created")
+ private String basename;
+ @Parameter(names = { "-o", "--output-dir" }, required = true,
+ description = "This is the output directory that is going to contain all the configuration file to be generated")
+ private String outputDirectory;
+ @Parameter(names = { "-h", "-help", "--help" }, help = true,
+ description = "Print the help message")
+ private boolean help = false;
+
+ private TablesConfiguration inputConfig;
+
+ public CreateNodeConfigurations(){}
+
+
+ public void readInput(){
+ try {
+ inputConfig = TablesConfiguration.readJsonFromFile(tableConfigurationsFile);
+ } catch (FileNotFoundException e) {
+ LOG.error("Input file is invalid or not found");
+ System.exit(1);
+ }
+ }
+
+ public void createAndSaveNodeConfigurations(){
+ List<NodeConfiguration> nodes = null;
+ try {
+ nodes = inputConfig.initializeAndCreateNodeConfigurations();
+ } catch (MDBCServiceException e) {
+ e.printStackTrace();
+ }
+ int counter = 0;
+ for(NodeConfiguration nodeConfig : nodes){
+ String name = (nodeConfig.nodeName==null||nodeConfig.nodeName.isEmpty())?Integer.toString(counter++): nodeConfig.nodeName;
+ nodeConfig.saveToFile(outputDirectory+"/"+basename+"-"+name+".json");
+ }
+ }
+
+ public static void main(String[] args) {
+ CreateNodeConfigurations configs = new CreateNodeConfigurations();
+ @SuppressWarnings("deprecation")
+ JCommander jc = new JCommander(configs, args);
+ if (configs.help) {
+ jc.usage();
+ System.exit(1);
+ return;
+ }
+ configs.readInput();
+ configs.createAndSaveNodeConfigurations();
+ }
+}
diff --git a/src/main/java/com/att/research/mdbc/tools/CreatePartition.java b/src/main/java/com/att/research/mdbc/tools/CreatePartition.java
new file mode 100644
index 0000000..09524cb
--- /dev/null
+++ b/src/main/java/com/att/research/mdbc/tools/CreatePartition.java
@@ -0,0 +1,66 @@
+package com.att.research.mdbc.tools;
+
+import com.att.research.logging.EELFLoggerDelegate;
+import com.att.research.mdbc.DatabasePartition;
+import com.att.research.mdbc.MDBCUtils;
+import com.att.research.mdbc.Range;
+import com.att.research.mdbc.configurations.NodeConfiguration;
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class CreatePartition {
+ public static final EELFLoggerDelegate LOG = EELFLoggerDelegate.getLogger(CreatePartition.class);
+
+ @Parameter(names = { "-t", "--tables" }, required = true,
+ description = "This is the tables that are assigned to this ")
+ private String tables;
+ @Parameter(names = { "-f", "--file" }, required = true,
+ description = "This is the output file that is going to have the configuration for the ranges")
+ private String file;
+ @Parameter(names = { "-i", "--tit-index" }, required = true,
+ description = "Index in the TiT Table")
+ private String titIndex;
+ @Parameter(names = { "-n", "--tit-table-name" }, required = true,
+ description = "Tit Table name")
+ private String titTable;
+ @Parameter(names = { "-r", "--redorecords-table-name" }, required = true,
+ description = "Redo Records Table name")
+ private String rrTable;
+ @Parameter(names = { "-p", "--partition-id" }, required = true,
+ description = "Partition Id")
+ private String partitionId;
+ @Parameter(names = { "-h", "-help", "--help" }, help = true,
+ description = "Print the help message")
+ private boolean help = false;
+
+ NodeConfiguration config;
+
+ public CreatePartition(){
+ }
+
+ public void convert(){
+ config = new NodeConfiguration(tables,titIndex,titTable,partitionId,"test","",rrTable);
+ }
+
+ public void saveToFile(){
+ config.saveToFile(file);
+ }
+
+ public static void main(String[] args) {
+
+ CreatePartition newPartition = new CreatePartition();
+ @SuppressWarnings("deprecation")
+ JCommander jc = new JCommander(newPartition, args);
+ if (newPartition.help) {
+ jc.usage();
+ System.exit(1);
+ return;
+ }
+ newPartition.convert();
+ newPartition.saveToFile();
+ }
+}