diff --git a/.gitignore b/.gitignore index acb9b8d91..fad40fcb3 100644 --- a/.gitignore +++ b/.gitignore @@ -16,7 +16,6 @@ local.properties .classpath .vscode/ .settings/ -.gradle/ .loadpath # External tool builders diff --git a/build.gradle b/build.gradle index 7a402dfbc..55792f0a3 100644 --- a/build.gradle +++ b/build.gradle @@ -68,7 +68,7 @@ repositories { dependencies { compile 'com.microsoft.azure:azure-keyvault:0.9.7', 'com.microsoft.azure:adal4j:1.1.3' - + testCompile 'junit:junit:4.12', 'org.junit.platform:junit-platform-console:1.0.0-M3', 'org.junit.platform:junit-platform-commons:1.0.0-M3', diff --git a/pom.xml b/pom.xml index 62b790dd7..753104ef2 100644 --- a/pom.xml +++ b/pom.xml @@ -118,7 +118,7 @@ com.zaxxer HikariCP - 2.6.1 + 2.6.0 test diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/ParsedSQLMetadata.java b/src/main/java/com/microsoft/sqlserver/jdbc/ParsedSQLMetadata.java deleted file mode 100644 index 19c34ebec..000000000 --- a/src/main/java/com/microsoft/sqlserver/jdbc/ParsedSQLMetadata.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Microsoft JDBC Driver for SQL Server - * - * Copyright(c) Microsoft Corporation All rights reserved. - * - * This program is made available under the terms of the MIT License. See the LICENSE file in the project root for more information. - */ - -package com.microsoft.sqlserver.jdbc; - -/** - * Used for caching of meta data from parsed SQL text. - */ -final class ParsedSQLCacheItem { - /** The SQL text AFTER processing. */ - String processedSQL; - int parameterCount; - String procedureName; - boolean bReturnValueSyntax; - - ParsedSQLCacheItem(String processedSQL, int parameterCount, String procedureName, boolean bReturnValueSyntax) { - this.processedSQL = processedSQL; - this.parameterCount = parameterCount; - this.procedureName = procedureName; - this.bReturnValueSyntax = bReturnValueSyntax; - } -} - diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java index e550cb68e..566c42b75 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java @@ -56,10 +56,6 @@ import org.ietf.jgss.GSSCredential; import org.ietf.jgss.GSSException; -import mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap; -import mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; -import mssql.googlecode.concurrentlinkedhashmap.EvictionListener; - /** * SQLServerConnection implements a JDBC connection to SQL Server. SQLServerConnections support JDBC connection pooling and may be either physical * JDBC connections or logical JDBC connections. @@ -89,20 +85,22 @@ public class SQLServerConnection implements ISQLServerConnection { // Threasholds related to when prepared statement handles are cleaned-up. 1 == immediately. /** - * The default for the prepared statement clean-up action threshold (i.e. when sp_unprepare is called). + * The initial default on application start-up for the prepared statement clean-up action threshold (i.e. when sp_unprepare is called). */ - static final int DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD = 10; // Used to set the initial default, can be changed later. + static final private int INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD = 10; // Used to set the initial default, can be changed later. + static private int defaultServerPreparedStatementDiscardThreshold = -1; // Current default for new connections private int serverPreparedStatementDiscardThreshold = -1; // Current limit for this particular connection. /** - * The default for if prepared statements should execute sp_executesql before following the prepare, unprepare pattern. + * The initial default on application start-up for if prepared statements should execute sp_executesql before following the prepare, unprepare pattern. */ - static final boolean DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL = false; // Used to set the initial default, can be changed later. false == use sp_executesql -> sp_prepexec -> sp_execute -> batched -> sp_unprepare pattern, true == skip sp_executesql part of pattern. + static final private boolean INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL = false; // Used to set the initial default, can be changed later. false == use sp_executesql -> sp_prepexec -> sp_execute -> batched -> sp_unprepare pattern, true == skip sp_executesql part of pattern. + static private Boolean defaultEnablePrepareOnFirstPreparedStatementCall = null; // Current default for new connections private Boolean enablePrepareOnFirstPreparedStatementCall = null; // Current limit for this particular connection. // Handle the actual queue of discarded prepared statements. - private ConcurrentLinkedQueue discardedPreparedStatementHandles = new ConcurrentLinkedQueue(); - private AtomicInteger discardedPreparedStatementHandleCount = new AtomicInteger(0); + private ConcurrentLinkedQueue discardedPreparedStatementHandles = new ConcurrentLinkedQueue(); + private AtomicInteger discardedPreparedStatementHandleQueueCount = new AtomicInteger(0); private boolean fedAuthRequiredByUser = false; private boolean fedAuthRequiredPreLoginResponse = false; @@ -117,189 +115,6 @@ public class SQLServerConnection implements ISQLServerConnection { private SqlFedAuthToken fedAuthToken = null; - static class Sha1HashKey { - private byte[] bytes; - - Sha1HashKey(String sql, String parametersDefinition) { - this(String.format("%s%s", sql, parametersDefinition)); - } - - Sha1HashKey(String s) { - bytes = getSha1Digest().digest(s.getBytes()); - } - - public boolean equals(Object obj) { - if (!(obj instanceof Sha1HashKey)) - return false; - - return java.util.Arrays.equals(bytes, ((Sha1HashKey)obj).bytes); - } - - public int hashCode() { - return java.util.Arrays.hashCode(bytes); - } - - private java.security.MessageDigest getSha1Digest() { - try { - return java.security.MessageDigest.getInstance("SHA-1"); - } - catch (final java.security.NoSuchAlgorithmException e) { - // This is not theoretically possible, but we're forced to catch it anyway - throw new RuntimeException(e); - } - } - } - - /** - * Used to keep track of an individual prepared statement handle. - */ - class PreparedStatementHandle { - private int handle = 0; - private final AtomicInteger handleRefCount = new AtomicInteger(); - private boolean isDirectSql; - private volatile boolean evictedFromCache; - private volatile boolean explicitlyDiscarded; - private Sha1HashKey key; - - PreparedStatementHandle(Sha1HashKey key, int handle, boolean isDirectSql, boolean isEvictedFromCache) { - this.key = key; - this.handle = handle; - this.isDirectSql = isDirectSql; - this.setIsEvictedFromCache(isEvictedFromCache); - handleRefCount.set(1); - } - - /** Has the statement been evicted from the statement handle cache. */ - private boolean isEvictedFromCache() { - return evictedFromCache; - } - - /** Specify whether the statement been evicted from the statement handle cache. */ - private void setIsEvictedFromCache(boolean isEvictedFromCache) { - this.evictedFromCache = isEvictedFromCache; - } - - /** Specify that this statement has been explicitly discarded from being used by the cache. */ - void setIsExplicitlyDiscarded() { - this.explicitlyDiscarded = true; - - evictCachedPreparedStatementHandle(this); - } - - /** Has the statement been explicitly discarded. */ - private boolean isExplicitlyDiscarded() { - return explicitlyDiscarded; - } - - /** Get the actual handle. */ - int getHandle() { - return handle; - } - - /** Get the cache key. */ - Sha1HashKey getKey() { - return key; - } - - boolean isDirectSql() { - return isDirectSql; - } - - /** Make sure handle cannot be re-used. - * - * @return - * false: Handle could not be discarded, it is in use. - * true: Handle was successfully put on path for discarding. - */ - private boolean tryDiscardHandle() { - return handleRefCount.compareAndSet(0, -999); - } - - /** Returns whether this statement has been discarded and can no longer be re-used. */ - private boolean isDiscarded() { - return 0 > handleRefCount.intValue(); - } - - /** Adds a new reference to this handle, i.e. re-using it. - * - * @return - * false: Reference could not be added, statement has been discarded or does not have a handle associated with it. - * true: Reference was successfully added. - */ - boolean tryAddReference() { - if (isDiscarded() || isExplicitlyDiscarded()) - return false; - else { - int refCount = handleRefCount.incrementAndGet(); - return refCount > 0; - } - } - - /** Remove a reference from this handle*/ - void removeReference() { - handleRefCount.decrementAndGet(); - } - } - - /** Size of the parsed SQL-text metadata cache */ - static final private int PARSED_SQL_CACHE_SIZE = 100; - - /** Cache of parsed SQL meta data */ - static private ConcurrentLinkedHashMap parsedSQLCache; - - static { - parsedSQLCache = new Builder() - .maximumWeightedCapacity(PARSED_SQL_CACHE_SIZE) - .build(); - } - - /** Get prepared statement cache entry if exists, if not parse and create a new one */ - static ParsedSQLCacheItem getCachedParsedSQL(Sha1HashKey key) { - return parsedSQLCache.get(key); - } - - /** Parse and create a information about parsed SQL text */ - static ParsedSQLCacheItem parseAndCacheSQL(Sha1HashKey key, String sql) throws SQLServerException { - JDBCSyntaxTranslator translator = new JDBCSyntaxTranslator(); - - String parsedSql = translator.translate(sql); - String procName = translator.getProcedureName(); // may return null - boolean returnValueSyntax = translator.hasReturnValueSyntax(); - int paramCount = countParams(parsedSql); - - ParsedSQLCacheItem cacheItem = new ParsedSQLCacheItem (parsedSql, paramCount, procName, returnValueSyntax); - parsedSQLCache.putIfAbsent(key, cacheItem); - return cacheItem; - } - - /** Size of the prepared statement handle cache */ - private int statementPoolingCacheSize = 10; - - /** Default size for prepared statement caches */ - static final int DEFAULT_STATEMENT_POOLING_CACHE_SIZE = 10; - /** Cache of prepared statement handles */ - private ConcurrentLinkedHashMap preparedStatementHandleCache; - /** Cache of prepared statement parameter metadata */ - private ConcurrentLinkedHashMap parameterMetadataCache; - - /** - * Find statement parameters. - * - * @param sql - * SQL text to parse for number of parameters to intialize. - */ - private static int countParams(String sql) { - int nParams = 0; - - // Figure out the expected number of parameters by counting the - // parameter placeholders in the SQL string. - int offset = -1; - while ((offset = ParameterUtils.scanSQLForChar('?', sql, ++offset)) < sql.length()) - ++nParams; - - return nParams; - } - SqlFedAuthToken getAuthenticationResult() { return fedAuthToken; } @@ -907,18 +722,6 @@ final boolean attachConnId() { connectionlogger.severe(message); throw new UnsupportedOperationException(message); } - - // Caching turned on? - if (0 < this.getStatementPoolingCacheSize()) { - preparedStatementHandleCache = new Builder() - .maximumWeightedCapacity(getStatementPoolingCacheSize()) - .listener(new PreparedStatementCacheEvictionListener()) - .build(); - - parameterMetadataCache = new Builder() - .maximumWeightedCapacity(getStatementPoolingCacheSize()) - .build(); - } } void setFailoverPartnerServerProvided(String partner) { @@ -1395,28 +1198,14 @@ Connection connectInternal(Properties propsIn, sendTimeAsDatetime = booleanPropertyOn(sPropKey, sPropValue); - // Must be set before DISABLE_STATEMENT_POOLING - sPropKey = SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(); - if (activeConnectionProperties.getProperty(sPropKey) != null && activeConnectionProperties.getProperty(sPropKey).length() > 0) { - try { - int n = (new Integer(activeConnectionProperties.getProperty(sPropKey))).intValue(); - this.setStatementPoolingCacheSize(n); - } - catch (NumberFormatException e) { - MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_statementPoolingCacheSize")); - Object[] msgArgs = {activeConnectionProperties.getProperty(sPropKey)}; - SQLServerException.makeFromDriverError(this, this, form.format(msgArgs), null, false); - } - } - - // Must be set after STATEMENT_POOLING_CACHE_SIZE sPropKey = SQLServerDriverBooleanProperty.DISABLE_STATEMENT_POOLING.toString(); sPropValue = activeConnectionProperties.getProperty(sPropKey); - if (null != sPropValue) { - // If disabled set cache size to 0 if disabled. - if(booleanPropertyOn(sPropKey, sPropValue)) - this.setStatementPoolingCacheSize(0); - } + if (sPropValue != null) // if the user does not set it, it is ok but if set the value can only be true + if (false == booleanPropertyOn(sPropKey, sPropValue)) { + MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_invaliddisableStatementPooling")); + Object[] msgArgs = {new String(sPropValue)}; + SQLServerException.makeFromDriverError(this, this, form.format(msgArgs), null, false); + } sPropKey = SQLServerDriverBooleanProperty.INTEGRATED_SECURITY.toString(); sPropValue = activeConnectionProperties.getProperty(sPropKey); @@ -2971,13 +2760,6 @@ public void close() throws SQLServerException { tdsChannel.close(); } - // Invalidate statement caches. - if(null != preparedStatementHandleCache) - preparedStatementHandleCache.clear(); - - if(null != parameterMetadataCache) - parameterMetadataCache.clear(); - // Clean-up queue etc. related to batching of prepared statement discard actions (sp_unprepare). cleanupPreparedStatementDiscardActions(); @@ -5519,24 +5301,38 @@ public static synchronized void setColumnEncryptionKeyCacheTtl(int columnEncrypt static synchronized long getColumnEncryptionKeyCacheTtl() { return columnEncryptionKeyCacheTtl; } + + /** + * Used to keep track of an individual handle ready for un-prepare. + */ + private final class PreparedStatementDiscardItem { + + int handle; + boolean directSql; + + PreparedStatementDiscardItem(int handle, boolean directSql) { + this.handle = handle; + this.directSql = directSql; + } + } + /** * Enqueue a discarded prepared statement handle to be clean-up on the server. * - * @param statementHandle - * The prepared statement handle that should be scheduled for unprepare. + * @param handle + * The prepared statement handle + * @param directSql + * Whether the statement handle is direct SQL (true) or a cursor (false) */ - final void enqueueUnprepareStatementHandle(PreparedStatementHandle statementHandle) { - if(null == statementHandle) - return; - - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.finer(this + ": Adding PreparedHandle to queue for un-prepare:" + statementHandle.getHandle()); + final void enqueuePreparedStatementDiscardItem(int handle, boolean directSql) { + if (this.getConnectionLogger().isLoggable(java.util.logging.Level.FINER)) + this.getConnectionLogger().finer(this + ": Adding PreparedHandle to queue for un-prepare:" + handle); // Add the new handle to the discarding queue and find out current # enqueued. - this.discardedPreparedStatementHandles.add(statementHandle); - this.discardedPreparedStatementHandleCount.incrementAndGet(); + this.discardedPreparedStatementHandles.add(new PreparedStatementDiscardItem(handle, directSql)); + this.discardedPreparedStatementHandleQueueCount.incrementAndGet(); } @@ -5546,22 +5342,59 @@ final void enqueueUnprepareStatementHandle(PreparedStatementHandle statementHand * @return Returns the current value per the description. */ public int getDiscardedServerPreparedStatementCount() { - return this.discardedPreparedStatementHandleCount.get(); + return this.discardedPreparedStatementHandleQueueCount.get(); } /** * Forces the un-prepare requests for any outstanding discarded prepared statements to be executed. */ - public void closeUnreferencedPreparedStatementHandles() { - this.unprepareUnreferencedPreparedStatementHandles(true); + public void closeDiscardedServerPreparedStatements() { + this.handlePreparedStatementDiscardActions(true); } /** * Remove references to outstanding un-prepare requests. Should be run when connection is closed. */ private final void cleanupPreparedStatementDiscardActions() { - discardedPreparedStatementHandles.clear(); - discardedPreparedStatementHandleCount.set(0); + this.discardedPreparedStatementHandles.clear(); + this.discardedPreparedStatementHandleQueueCount.set(0); + } + + /** + * The initial default on application start-up for if prepared statements should execute sp_executesql before following the prepare, unprepare pattern. + * + * @return Returns the current setting per the description. + */ + static public boolean getInitialDefaultEnablePrepareOnFirstPreparedStatementCall() { + return INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL; + } + + /** + * Returns the default behavior for new connection instances. If false the first execution will call sp_executesql and not prepare + * a statement, once the second execution happens it will call sp_prepexec and actually setup a prepared statement handle. Following + * executions will call sp_execute. This relieves the need for sp_unprepare on prepared statement close if the statement is only + * executed once. Initial setting for this option is available in INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL. + * + * @return Returns the current setting per the description. + */ + static public boolean getDefaultEnablePrepareOnFirstPreparedStatementCall() { + if(null == defaultEnablePrepareOnFirstPreparedStatementCall) + return getInitialDefaultEnablePrepareOnFirstPreparedStatementCall(); + else + return defaultEnablePrepareOnFirstPreparedStatementCall; + } + + /** + * Specifies the default behavior for new connection instances. If value is false the first execution will call sp_executesql and not prepare + * a statement, once the second execution happens it will call sp_prepexec and actually setup a prepared statement handle. Following + * executions will call sp_execute. This relieves the need for sp_unprepare on prepared statement close if the statement is only + * executed once. Initial setting for this option is available in INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL. + * + * @param value + * Changes the setting per the description. + */ + static public void setDefaultEnablePrepareOnFirstPreparedStatementCall(boolean value) { + defaultEnablePrepareOnFirstPreparedStatementCall = value; } /** @@ -5574,7 +5407,7 @@ private final void cleanupPreparedStatementDiscardActions() { */ public boolean getEnablePrepareOnFirstPreparedStatementCall() { if(null == this.enablePrepareOnFirstPreparedStatementCall) - return DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL; + return getDefaultEnablePrepareOnFirstPreparedStatementCall(); else return this.enablePrepareOnFirstPreparedStatementCall; } @@ -5593,201 +5426,130 @@ public void setEnablePrepareOnFirstPreparedStatementCall(boolean value) { } /** - * Returns the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions - * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is - * {@literal <=} 1, unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1, these calls - * will be batched together to avoid overhead of calling sp_unprepare too often. The default for this option can be changed by calling - * getDefaultServerPreparedStatementDiscardThreshold(). + * The initial default on application start-up for the prepared statement clean-up action threshold (i.e. when sp_unprepare is called). * * @return Returns the current setting per the description. */ - public int getServerPreparedStatementDiscardThreshold() { - if (0 > this.serverPreparedStatementDiscardThreshold) - return DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD; - else - return this.serverPreparedStatementDiscardThreshold; + static public int getInitialDefaultServerPreparedStatementDiscardThreshold() { + return INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD; } /** - * Specifies the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions + * Returns the default behavior for new connection instances. This setting controls how many outstanding prepared statement discard actions * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be - * batched together to avoid overhead of calling sp_unprepare too often. + * batched together to avoid overhead of calling sp_unprepare too often. Initial setting for this option is available in + * INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD. * - * @param value - * Changes the setting per the description. + * @return Returns the current setting per the description. */ - public void setServerPreparedStatementDiscardThreshold(int value) { - this.serverPreparedStatementDiscardThreshold = Math.max(0, value); - } - - final boolean isPreparedStatementUnprepareBatchingEnabled() { - return 1 < getServerPreparedStatementDiscardThreshold(); + static public int getDefaultServerPreparedStatementDiscardThreshold() { + if(0 > defaultServerPreparedStatementDiscardThreshold) + return getInitialDefaultServerPreparedStatementDiscardThreshold(); + else + return defaultServerPreparedStatementDiscardThreshold; } /** - * Cleans-up discarded prepared statement handles on the server using batched un-prepare actions if the batching threshold has been reached. + * Specifies the default behavior for new connection instances. This setting controls how many outstanding prepared statement discard actions + * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is + * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be + * batched together to avoid overhead of calling sp_unprepare too often. Initial setting for this option is available in + * INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD. * - * @param force - * When force is set to true we ignore the current threshold for if the discard actions should run and run them anyway. - */ - final void unprepareUnreferencedPreparedStatementHandles(boolean force) { - // Skip out if session is unavailable to adhere to previous non-batched behavior. - if (isSessionUnAvailable()) - return; - - final int threshold = getServerPreparedStatementDiscardThreshold(); - - // Met threshold to clean-up? - if (force || threshold < getDiscardedServerPreparedStatementCount()) { - - // Create batch of sp_unprepare statements. - StringBuilder sql = new StringBuilder(threshold * 32/*EXEC sp_cursorunprepare++;*/); - - // Build the string containing no more than the # of handles to remove. - // Note that sp_unprepare can fail if the statement is already removed. - // However, the server will only abort that statement and continue with - // the remaining clean-up. - int handlesRemoved = 0; - PreparedStatementHandle statementHandle = null; - - while (null != (statementHandle = discardedPreparedStatementHandles.poll())){ - ++handlesRemoved; - - sql.append(statementHandle.isDirectSql() ? "EXEC sp_unprepare " : "EXEC sp_cursorunprepare ") - .append(statementHandle.getHandle()) - .append(';'); - } - - try { - // Execute the batched set. - try(Statement stmt = this.createStatement()) { - stmt.execute(sql.toString()); - } - - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.finer(this + ": Finished un-preparing handle count:" + handlesRemoved); - } - catch(SQLException e) { - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.log(Level.FINER, this + ": Error batch-closing at least one prepared handle", e); - } - - // Decrement threshold counter - discardedPreparedStatementHandleCount.addAndGet(-handlesRemoved); - } - } - - - /** - * Returns the size of the prepared statement cache for this connection. A value less than 1 means no cache. - * @return Returns the current setting per the description. + * @param value + * Changes the setting per the description. */ - public int getStatementPoolingCacheSize() { - return statementPoolingCacheSize; + static public void setDefaultServerPreparedStatementDiscardThreshold(int value) { + defaultServerPreparedStatementDiscardThreshold = value; } /** - * Returns the current number of pooled prepared statement handles. + * Returns the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions + * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is + * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be + * batched together to avoid overhead of calling sp_unprepare too often. The default for this option can be changed by calling + * getDefaultServerPreparedStatementDiscardThreshold(). + * * @return Returns the current setting per the description. */ - public int getStatementHandleCacheEntryCount() { - if(!isStatementPoolingEnabled()) - return 0; + public int getServerPreparedStatementDiscardThreshold() { + if(0 > this.serverPreparedStatementDiscardThreshold) + return getDefaultServerPreparedStatementDiscardThreshold(); else - return this.preparedStatementHandleCache.size(); + return this.serverPreparedStatementDiscardThreshold; } /** - * Whether statement pooling is enabled or not for this connection. - * @return Returns the current setting per the description. + * Specifies the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions + * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is + * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be + * batched together to avoid overhead of calling sp_unprepare too often. + * + * @param value + * Changes the setting per the description. */ - public boolean isStatementPoolingEnabled() { - return null != preparedStatementHandleCache && 0 < this.getStatementPoolingCacheSize(); + public void setServerPreparedStatementDiscardThreshold(int value) { + this.serverPreparedStatementDiscardThreshold = value; } /** - * Specifies the size of the prepared statement cache for this conection. A value less than 1 means no cache. - * @param value The new cache size. + * Cleans-up discarded prepared statement handles on the server using batched un-prepare actions if the batching threshold has been reached. * + * @param force + * When force is set to true we ignore the current threshold for if the discard actions should run and run them anyway. */ - public void setStatementPoolingCacheSize(int value) { - if (value != this.statementPoolingCacheSize) { - value = Math.max(0, value); - statementPoolingCacheSize = value; - - if (null != preparedStatementHandleCache) - preparedStatementHandleCache.setCapacity(value); - - if (null != parameterMetadataCache) - parameterMetadataCache.setCapacity(value); - } - } - - /** Get a parameter metadata cache entry if statement pooling is enabled */ - final SQLServerParameterMetaData getCachedParameterMetadata(Sha1HashKey key) { - if(!isStatementPoolingEnabled()) - return null; - - return parameterMetadataCache.get(key); - } - - /** Register a parameter metadata cache entry if statement pooling is enabled */ - final void registerCachedParameterMetadata(Sha1HashKey key, SQLServerParameterMetaData pmd) { - if(!isStatementPoolingEnabled() || null == pmd) + final void handlePreparedStatementDiscardActions(boolean force) { + // Skip out if session is unavailable to adhere to previous non-batched behavior. + if (this.isSessionUnAvailable()) return; - - parameterMetadataCache.put(key, pmd); - } - - /** Get or create prepared statement handle cache entry if statement pooling is enabled */ - final PreparedStatementHandle getCachedPreparedStatementHandle(Sha1HashKey key) { - if(!isStatementPoolingEnabled()) - return null; - - return preparedStatementHandleCache.get(key); - } - /** Get or create prepared statement handle cache entry if statement pooling is enabled */ - final PreparedStatementHandle registerCachedPreparedStatementHandle(Sha1HashKey key, int handle, boolean isDirectSql) { - if(!isStatementPoolingEnabled() || null == key) - return null; - - PreparedStatementHandle cacheItem = new PreparedStatementHandle(key, handle, isDirectSql, false); - preparedStatementHandleCache.putIfAbsent(key, cacheItem); - return cacheItem; - } + final int threshold = this.getServerPreparedStatementDiscardThreshold(); - /** Return prepared statement handle cache entry so it can be un-prepared. */ - final void returnCachedPreparedStatementHandle(PreparedStatementHandle handle) { - handle.removeReference(); + // Find out current # enqueued, if force, make sure it always exceeds threshold. + int count = force ? threshold + 1 : this.getDiscardedServerPreparedStatementCount(); - if (handle.isEvictedFromCache() && handle.tryDiscardHandle()) - enqueueUnprepareStatementHandle(handle); - } - - /** Force eviction of prepared statement handle cache entry. */ - final void evictCachedPreparedStatementHandle(PreparedStatementHandle handle) { - if(null == handle || null == handle.getKey()) - return; - - preparedStatementHandleCache.remove(handle.getKey()); - } + // Met threshold to clean-up? + if(threshold < count) { + + PreparedStatementDiscardItem prepStmtDiscardAction = this.discardedPreparedStatementHandles.poll(); + if(null != prepStmtDiscardAction) { + int handlesRemoved = 0; + + // Create batch of sp_unprepare statements. + StringBuilder sql = new StringBuilder(count * 32/*EXEC sp_cursorunprepare++;*/); + + // Build the string containing no more than the # of handles to remove. + // Note that sp_unprepare can fail if the statement is already removed. + // However, the server will only abort that statement and continue with + // the remaining clean-up. + do { + ++handlesRemoved; + + sql.append(prepStmtDiscardAction.directSql ? "EXEC sp_unprepare " : "EXEC sp_cursorunprepare ") + .append(prepStmtDiscardAction.handle) + .append(';'); + } while (null != (prepStmtDiscardAction = this.discardedPreparedStatementHandles.poll())); - // Handle closing handles when removed from cache. - final class PreparedStatementCacheEvictionListener implements EvictionListener { - public void onEviction(Sha1HashKey key, PreparedStatementHandle handle) { - if(null != handle) { - handle.setIsEvictedFromCache(true); // Mark as evicted from cache. + try { + // Execute the batched set. + try(Statement stmt = this.createStatement()) { + stmt.execute(sql.toString()); + } - // Only discard if not referenced. - if(handle.tryDiscardHandle()) { - enqueueUnprepareStatementHandle(handle); - // Do not run discard actions here! Can interfere with executing statement. - } + if (this.getConnectionLogger().isLoggable(java.util.logging.Level.FINER)) + this.getConnectionLogger().finer(this + ": Finished un-preparing handle count:" + handlesRemoved); + } + catch(SQLException e) { + if (this.getConnectionLogger().isLoggable(java.util.logging.Level.FINER)) + this.getConnectionLogger().log(Level.FINER, this + ": Error batch-closing at least one prepared handle", e); + } + + // Decrement threshold counter + this.discardedPreparedStatementHandleQueueCount.addAndGet(-handlesRemoved); } } - } + } } // Helper class for security manager functions used by SQLServerConnection class. diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java index b94bb4d6f..414e065e7 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java @@ -694,8 +694,8 @@ public void setEnablePrepareOnFirstPreparedStatementCall(boolean enablePrepareOn * @return Returns the current setting per the description. */ public boolean getEnablePrepareOnFirstPreparedStatementCall() { - boolean defaultValue = SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.getDefaultValue(); - return getBooleanProperty(connectionProps, SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), defaultValue); + return getBooleanProperty(connectionProps, SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), + SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()); } /** @@ -720,28 +720,8 @@ public void setServerPreparedStatementDiscardThreshold(int serverPreparedStateme * @return Returns the current setting per the description. */ public int getServerPreparedStatementDiscardThreshold() { - int defaultSize = SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.getDefaultValue(); - return getIntProperty(connectionProps, SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), defaultSize); - } - - /** - * Specifies the size of the prepared statement cache for this conection. A value less than 1 means no cache. - * - * @param statementPoolingCacheSize - * Changes the setting per the description. - */ - public void setStatementPoolingCacheSize(int statementPoolingCacheSize) { - setIntProperty(connectionProps, SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(), statementPoolingCacheSize); - } - - /** - * Returns the size of the prepared statement cache for this conection. A value less than 1 means no cache. - * - * @return Returns the current setting per the description. - */ - public int getStatementPoolingCacheSize() { - int defaultSize = SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.getDefaultValue(); - return getIntProperty(connectionProps, SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(), defaultSize); + return getIntProperty(connectionProps, SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), + SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()); } /** diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java index 1ea9f3125..646af582b 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java @@ -268,15 +268,13 @@ public String toString() { } enum SQLServerDriverIntProperty { - PACKET_SIZE ("packetSize", TDS.DEFAULT_PACKET_SIZE), - LOCK_TIMEOUT ("lockTimeout", -1), - LOGIN_TIMEOUT ("loginTimeout", 15), - QUERY_TIMEOUT ("queryTimeout", -1), - PORT_NUMBER ("portNumber", 1433), - SOCKET_TIMEOUT ("socketTimeout", 0), - SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD("serverPreparedStatementDiscardThreshold", SQLServerConnection.DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD), - STATEMENT_POOLING_CACHE_SIZE ("statementPoolingCacheSize", SQLServerConnection.DEFAULT_STATEMENT_POOLING_CACHE_SIZE), - ; + PACKET_SIZE ("packetSize", TDS.DEFAULT_PACKET_SIZE), + LOCK_TIMEOUT ("lockTimeout", -1), + LOGIN_TIMEOUT ("loginTimeout", 15), + QUERY_TIMEOUT ("queryTimeout", -1), + PORT_NUMBER ("portNumber", 1433), + SOCKET_TIMEOUT ("socketTimeout", 0), + SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD("serverPreparedStatementDiscardThreshold", -1/*This is not the default, default handled in SQLServerConnection and is not final/const*/); private String name; private int defaultValue; @@ -296,9 +294,9 @@ public String toString() { } } -enum SQLServerDriverBooleanProperty +enum SQLServerDriverBooleanProperty { - DISABLE_STATEMENT_POOLING ("disableStatementPooling", false), + DISABLE_STATEMENT_POOLING ("disableStatementPooling", true), ENCRYPT ("encrypt", false), INTEGRATED_SECURITY ("integratedSecurity", false), LAST_UPDATE_COUNT ("lastUpdateCount", true), @@ -310,7 +308,7 @@ enum SQLServerDriverBooleanProperty TRUST_SERVER_CERTIFICATE ("trustServerCertificate", false), XOPEN_STATES ("xopenStates", false), FIPS ("fips", false), - ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT("enablePrepareOnFirstPreparedStatementCall", SQLServerConnection.DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL); + ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT("enablePrepareOnFirstPreparedStatementCall", false/*This is not the default, default handled in SQLServerConnection and is not final/const*/); private String name; private boolean defaultValue; @@ -339,10 +337,10 @@ public final class SQLServerDriver implements java.sql.Driver { { // default required available choices // property name value property (if appropriate) - new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_INTENT.toString(), SQLServerDriverStringProperty.APPLICATION_INTENT.getDefaultValue(), false, new String[]{ApplicationIntent.READ_ONLY.toString(), ApplicationIntent.READ_WRITE.toString()}), - new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_NAME.toString(), SQLServerDriverStringProperty.APPLICATION_NAME.getDefaultValue(), false, null), + new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_INTENT.toString(), SQLServerDriverStringProperty.APPLICATION_INTENT.getDefaultValue(), false, new String[]{ApplicationIntent.READ_ONLY.toString(), ApplicationIntent.READ_WRITE.toString()}), + new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_NAME.toString(), SQLServerDriverStringProperty.APPLICATION_NAME.getDefaultValue(), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.COLUMN_ENCRYPTION.toString(), SQLServerDriverStringProperty.COLUMN_ENCRYPTION.getDefaultValue(), false, new String[] {ColumnEncryptionSetting.Disabled.toString(), ColumnEncryptionSetting.Enabled.toString()}), - new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.DATABASE_NAME.toString(), SQLServerDriverStringProperty.DATABASE_NAME.getDefaultValue(), false, null), + new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.DATABASE_NAME.toString(), SQLServerDriverStringProperty.DATABASE_NAME.getDefaultValue(), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.DISABLE_STATEMENT_POOLING.toString(), Boolean.toString(SQLServerDriverBooleanProperty.DISABLE_STATEMENT_POOLING.getDefaultValue()), false, new String[] {"true"}), new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.ENCRYPT.toString(), Boolean.toString(SQLServerDriverBooleanProperty.ENCRYPT.getDefaultValue()), false, TRUE_FALSE), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.FAILOVER_PARTNER.toString(), SQLServerDriverStringProperty.FAILOVER_PARTNER.getDefaultValue(), false, null), @@ -356,7 +354,7 @@ public final class SQLServerDriver implements java.sql.Driver { new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.LOCK_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.LOCK_TIMEOUT.getDefaultValue()), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.LOGIN_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.LOGIN_TIMEOUT.getDefaultValue()), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.MULTI_SUBNET_FAILOVER.toString(), Boolean.toString(SQLServerDriverBooleanProperty.MULTI_SUBNET_FAILOVER.getDefaultValue()), false, TRUE_FALSE), - new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.PACKET_SIZE.toString(), Integer.toString(SQLServerDriverIntProperty.PACKET_SIZE.getDefaultValue()), false, null), + new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.PACKET_SIZE.toString(), Integer.toString(SQLServerDriverIntProperty.PACKET_SIZE.getDefaultValue()), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.PASSWORD.toString(), SQLServerDriverStringProperty.PASSWORD.getDefaultValue(), true, null), new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.PORT_NUMBER.toString(), Integer.toString(SQLServerDriverIntProperty.PORT_NUMBER.getDefaultValue()), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.QUERY_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.QUERY_TIMEOUT.getDefaultValue()), false, null), @@ -373,16 +371,15 @@ public final class SQLServerDriver implements java.sql.Driver { new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.toString(), SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.getDefaultValue(), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.SEND_TIME_AS_DATETIME.toString(), Boolean.toString(SQLServerDriverBooleanProperty.SEND_TIME_AS_DATETIME.getDefaultValue()), false, TRUE_FALSE), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.USER.toString(), SQLServerDriverStringProperty.USER.getDefaultValue(), true, null), - new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.WORKSTATION_ID.toString(), SQLServerDriverStringProperty.WORKSTATION_ID.getDefaultValue(), false, null), + new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.WORKSTATION_ID.toString(), SQLServerDriverStringProperty.WORKSTATION_ID.getDefaultValue(), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.XOPEN_STATES.toString(), Boolean.toString(SQLServerDriverBooleanProperty.XOPEN_STATES.getDefaultValue()), false, TRUE_FALSE), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.AUTHENTICATION_SCHEME.toString(), SQLServerDriverStringProperty.AUTHENTICATION_SCHEME.getDefaultValue(), false, new String[] {AuthenticationScheme.javaKerberos.toString(),AuthenticationScheme.nativeAuthentication.toString()}), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.AUTHENTICATION.toString(), SQLServerDriverStringProperty.AUTHENTICATION.getDefaultValue(), false, new String[] {SqlAuthentication.NotSpecified.toString(),SqlAuthentication.SqlPassword.toString(),SqlAuthentication.ActiveDirectoryPassword.toString(),SqlAuthentication.ActiveDirectoryIntegrated.toString()}), - new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.FIPS_PROVIDER.toString(), SQLServerDriverStringProperty.FIPS_PROVIDER.getDefaultValue(), false, null), + new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.FIPS_PROVIDER.toString(), SQLServerDriverStringProperty.FIPS_PROVIDER.getDefaultValue(), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.SOCKET_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.SOCKET_TIMEOUT.getDefaultValue()), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.FIPS.toString(), Boolean.toString(SQLServerDriverBooleanProperty.FIPS.getDefaultValue()), false, TRUE_FALSE), - new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), Boolean.toString(SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.getDefaultValue()), false,TRUE_FALSE), - new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), Integer.toString(SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.getDefaultValue()), false, null), - new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(), Integer.toString(SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.getDefaultValue()), false, null), + new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), Boolean.toString(SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()), false, TRUE_FALSE), + new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), Integer.toString(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()), false, null), new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.JAAS_CONFIG_NAME.toString(), SQLServerDriverStringProperty.JAAS_CONFIG_NAME.getDefaultValue(), false, null), }; diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java index 5d8fc5110..a309b8dad 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java @@ -505,9 +505,7 @@ String parseProcIdentifier(String procIdentifier) throws SQLServerException { } private void checkClosed() throws SQLServerException { - // stmtParent does not seem to be re-used, should just verify connection is not closed. - // stmtParent.checkClosed(); - con.checkClosed(); + stmtParent.checkClosed(); } /** diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java index 4bfcb58e2..48b6926e4 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java @@ -6,10 +6,7 @@ * This program is made available under the terms of the MIT License. See the LICENSE file in the project root for more information. */ -package com.microsoft.sqlserver.jdbc; - -import static com.microsoft.sqlserver.jdbc.SQLServerConnection.getCachedParsedSQL; -import static com.microsoft.sqlserver.jdbc.SQLServerConnection.parseAndCacheSQL; +package com.microsoft.sqlserver.jdbc; import java.io.InputStream; import java.io.Reader; @@ -31,9 +28,6 @@ import java.util.Vector; import java.util.logging.Level; -import com.microsoft.sqlserver.jdbc.SQLServerConnection.PreparedStatementHandle; -import com.microsoft.sqlserver.jdbc.SQLServerConnection.Sha1HashKey; - /** * SQLServerPreparedStatement provides JDBC prepared statement functionality. SQLServerPreparedStatement provides methods for the user to supply * parameters as any native Java type and many Java object types. @@ -57,10 +51,13 @@ public class SQLServerPreparedStatement extends SQLServerStatement implements IS private static final int BATCH_STATEMENT_DELIMITER_TDS_72 = 0xFF; final int nBatchStatementDelimiter = BATCH_STATEMENT_DELIMITER_TDS_72; + /** the user's prepared sql syntax */ + private String sqlCommand; + /** The prepared type definitions */ private String preparedTypeDefinitions; - /** Processed SQL statement text, may not be same as what user initially passed. */ + /** The users SQL statement text */ final String userSQL; /** SQL statement with expanded parameter tokens */ @@ -69,12 +66,6 @@ public class SQLServerPreparedStatement extends SQLServerStatement implements IS /** True if this execute has been called for this statement at least once */ private boolean isExecutedAtLeastOnce = false; - /** Reference to cache item for statement handle pooling. Only used to decrement ref count on statement close. */ - private PreparedStatementHandle cachedPreparedStatementHandle; - - /** Hash of user supplied SQL statement used for various cache lookups */ - private Sha1HashKey sqlTextCacheKey; - /** * Array with parameter names generated in buildParamTypeDefinitions For mapping encryption information to parameters, as the second result set * returned by sp_describe_parameter_encryption doesn't depend on order of input parameter @@ -99,36 +90,6 @@ public class SQLServerPreparedStatement extends SQLServerStatement implements IS /** The prepared statement handle returned by the server */ private int prepStmtHandle = 0; - private void setPreparedStatementHandle(int handle) { - this.prepStmtHandle = handle; - } - - /** The server handle for this prepared statement. If a value {@literal <} 1 is returned no handle has been created. - * - * @return - * Per the description. - * @throws SQLServerException when an error occurs - */ - public int getPreparedStatementHandle() throws SQLServerException { - checkClosed(); - return prepStmtHandle; - } - - /** Returns true if this statement has a server handle. - * - * @return - * Per the description. - */ - private boolean hasPreparedStatementHandle() { - return 0 < prepStmtHandle; - } - - /** Resets the server handle for this prepared statement to no handle. - */ - private void resetPrepStmtHandle() { - prepStmtHandle = 0; - } - /** Flag set to true when statement execution is expected to return the prepared statement handle */ private boolean expectPrepStmtHandle = false; @@ -164,65 +125,47 @@ String getClassNameInternal() { int nRSConcur, SQLServerStatementColumnEncryptionSetting stmtColEncSetting) throws SQLServerException { super(conn, nRSType, nRSConcur, stmtColEncSetting); - - if (null == sql) { - MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_NullValue")); - Object[] msgArgs1 = {"Statement SQL"}; - throw new SQLServerException(form.format(msgArgs1), null); - } - stmtPoolable = true; + sqlCommand = sql; - // Create a cache key for this statement. - sqlTextCacheKey = new Sha1HashKey(sql); - - // Parse or fetch SQL metadata from cache. - ParsedSQLCacheItem parsedSQL = getCachedParsedSQL(sqlTextCacheKey); - if(null != parsedSQL) { - isExecutedAtLeastOnce = true; - } - else { - parsedSQL = parseAndCacheSQL(sqlTextCacheKey, sql); - } + JDBCSyntaxTranslator translator = new JDBCSyntaxTranslator(); + sql = translator.translate(sql); + procedureName = translator.getProcedureName(); // may return null + bReturnValueSyntax = translator.hasReturnValueSyntax(); - // Retrieve meta data from cache item. - procedureName = parsedSQL.procedureName; - bReturnValueSyntax = parsedSQL.bReturnValueSyntax; - userSQL = parsedSQL.processedSQL; - initParams(parsedSQL.parameterCount); + userSQL = sql; + initParams(userSQL); } /** * Close the prepared statement's prepared handle. */ private void closePreparedHandle() { - if (!hasPreparedStatementHandle()) + if (0 == prepStmtHandle) return; // If the connection is already closed, don't bother trying to close // the prepared handle. We won't be able to, and it's already closed // on the server anyway. if (connection.isSessionUnAvailable()) { - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.finer(this + ": Not closing PreparedHandle:" + prepStmtHandle + "; connection is already closed."); + if (getStatementLogger().isLoggable(java.util.logging.Level.FINER)) + getStatementLogger().finer(this + ": Not closing PreparedHandle:" + prepStmtHandle + "; connection is already closed."); } else { isExecutedAtLeastOnce = false; final int handleToClose = prepStmtHandle; - resetPrepStmtHandle(); + prepStmtHandle = 0; - // Handle unprepare actions through statement pooling. - if (null != cachedPreparedStatementHandle) { - connection.returnCachedPreparedStatementHandle(cachedPreparedStatementHandle); - } - // If no reference to a statement pool cache item is found handle unprepare actions through batching @ connection level. - else if(connection.isPreparedStatementUnprepareBatchingEnabled()) { - connection.enqueueUnprepareStatementHandle(connection.new PreparedStatementHandle(null, handleToClose, executedSqlDirectly, true)); + // Using batched clean-up? If not, use old method of calling sp_unprepare. + if(1 < connection.getServerPreparedStatementDiscardThreshold()) { + // Handle unprepare actions through batching @ connection level. + connection.enqueuePreparedStatementDiscardItem(handleToClose, executedSqlDirectly); + connection.handlePreparedStatementDiscardActions(false); } else { - // Non batched behavior (same as pre batch clean-up implementation) - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.finer(this + ": Closing PreparedHandle:" + handleToClose); + // Non batched behavior (same as pre batch impl.) + if (getStatementLogger().isLoggable(java.util.logging.Level.FINER)) + getStatementLogger().finer(this + ": Closing PreparedHandle:" + handleToClose); final class PreparedHandleClose extends UninterruptableTDSCommand { PreparedHandleClose() { @@ -246,16 +189,13 @@ final boolean doExecute() throws SQLServerException { executeCommand(new PreparedHandleClose()); } catch (SQLServerException e) { - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.log(Level.FINER, this + ": Error (ignored) closing PreparedHandle:" + handleToClose, e); + if (getStatementLogger().isLoggable(java.util.logging.Level.FINER)) + getStatementLogger().log(Level.FINER, this + ": Error (ignored) closing PreparedHandle:" + handleToClose, e); } - if (loggerExternal.isLoggable(java.util.logging.Level.FINER)) - loggerExternal.finer(this + ": Closed PreparedHandle:" + handleToClose); + if (getStatementLogger().isLoggable(java.util.logging.Level.FINER)) + getStatementLogger().finer(this + ": Closed PreparedHandle:" + handleToClose); } - - // Always run any outstanding discard actions as statement pooling always uses batched sp_unprepare. - connection.unprepareUnreferencedPreparedStatementHandles(false); } } @@ -276,13 +216,21 @@ final void closeInternal() { batchParamValues = null; } - /** + /** * Intialize the statement parameters. * - * @param nParams - * Number of parameters to Intialize. + * @param sql */ - /* L0 */ final void initParams(int nParams) { + /* L0 */ final void initParams(String sql) { + encryptionMetadataIsRetrieved = false; + int nParams = 0; + + // Figure out the expected number of parameters by counting the + // parameter placeholders in the SQL string. + int offset = -1; + while ((offset = ParameterUtils.scanSQLForChar('?', sql, ++offset)) < sql.length()) + ++nParams; + inOutParam = new Parameter[nParams]; for (int i = 0; i < nParams; i++) { inOutParam[i] = new Parameter(Util.shouldHonorAEForParameters(stmtColumnEncriptionSetting, connection)); @@ -487,7 +435,6 @@ final void doExecutePreparedStatement(PrepStmtExecCmd command) throws SQLServerE loggerExternal.finer(toString() + " ActivityId: " + ActivityCorrelator.getNext().toString()); } - boolean hasExistingTypeDefinitions = preparedTypeDefinitions != null; boolean hasNewTypeDefinitions = true; if (!encryptionMetadataIsRetrieved) { hasNewTypeDefinitions = buildPreparedStrings(inOutParam, false); @@ -509,32 +456,15 @@ final void doExecutePreparedStatement(PrepStmtExecCmd command) throws SQLServerE hasNewTypeDefinitions = buildPreparedStrings(inOutParam, true); } - // Retry execution if existing handle could not be re-used. - for(int attempt = 1; attempt <= 2; ++attempt) { - try { - // Re-use handle if available, requires parameter definitions which are not available until here. - if (reuseCachedHandle(hasNewTypeDefinitions, 1 < attempt)) { - hasNewTypeDefinitions = false; - } - - // Start the request and detach the response reader so that we can - // continue using it after we return. - TDSWriter tdsWriter = command.startRequest(TDS.PKT_RPC); - - doPrepExec(tdsWriter, inOutParam, hasNewTypeDefinitions, hasExistingTypeDefinitions); - - ensureExecuteResultsReader(command.startResponse(getIsResponseBufferingAdaptive())); - startResults(); - getNextResult(); - } - catch(SQLException e) { - if (retryBasedOnFailedReuseOfCachedHandle(e, attempt)) - continue; - else - throw e; - } - break; - } + // Start the request and detach the response reader so that we can + // continue using it after we return. + TDSWriter tdsWriter = command.startRequest(TDS.PKT_RPC); + + doPrepExec(tdsWriter, inOutParam, hasNewTypeDefinitions); + + ensureExecuteResultsReader(command.startResponse(getIsResponseBufferingAdaptive())); + startResults(); + getNextResult(); if (EXECUTE_QUERY == executeMethod && null == resultSet) { SQLServerException.makeFromDriverError(connection, this, SQLServerException.getErrString("R_noResultset"), null, true); @@ -544,15 +474,6 @@ else if (EXECUTE_UPDATE == executeMethod && null != resultSet) { } } - /** Should the execution be retried because the re-used cached handle could not be re-used due to server side state changes? */ - private boolean retryBasedOnFailedReuseOfCachedHandle(SQLException e, int attempt) { - // Only retry based on these error codes: - // 586: The prepared statement handle %d is not valid in this context. Please verify that current database, user default schema, and ANSI_NULLS and QUOTED_IDENTIFIER set options are not changed since the handle is prepared. - // 8179: Could not find prepared statement with handle %d. - // 99586: Error used for testing. - return 1 == attempt && (586 == e.getErrorCode() || 8179 == e.getErrorCode() || 99586 == e.getErrorCode()); - } - /** * Consume the OUT parameter for the statement object itself. * @@ -573,14 +494,7 @@ boolean onRetValue(TDSReader tdsReader) throws SQLServerException { expectPrepStmtHandle = false; Parameter param = new Parameter(Util.shouldHonorAEForParameters(stmtColumnEncriptionSetting, connection)); param.skipRetValStatus(tdsReader); - - setPreparedStatementHandle(param.getInt(tdsReader)); - - // Cache the reference to the newly created handle, NOT for cursorable handles. - if (null == cachedPreparedStatementHandle && !isCursorable(executeMethod)) { - cachedPreparedStatementHandle = connection.registerCachedPreparedStatementHandle(new Sha1HashKey(preparedSQL, preparedTypeDefinitions), prepStmtHandle, executedSqlDirectly); - } - + prepStmtHandle = param.getInt(tdsReader); param.skipValue(tdsReader, true); if (getStatementLogger().isLoggable(java.util.logging.Level.FINER)) getStatementLogger().finer(toString() + ": Setting PreparedHandle:" + prepStmtHandle); @@ -616,7 +530,7 @@ void sendParamsByRPC(TDSWriter tdsWriter, private void buildServerCursorPrepExecParams(TDSWriter tdsWriter) throws SQLServerException { if (getStatementLogger().isLoggable(java.util.logging.Level.FINE)) - getStatementLogger().fine(toString() + ": calling sp_cursorprepexec: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL); + getStatementLogger().fine(toString() + ": calling sp_cursorprepexec: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL); expectPrepStmtHandle = true; executedSqlDirectly = false; @@ -631,8 +545,8 @@ private void buildServerCursorPrepExecParams(TDSWriter tdsWriter) throws SQLServ // // IN (reprepare): Old handle to unprepare before repreparing // OUT: The newly prepared handle - tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), true); - resetPrepStmtHandle(); + tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), true); + prepStmtHandle = 0; // OUT tdsWriter.writeRPCInt(null, new Integer(0), true); // cursor ID (OUTPUT) @@ -658,7 +572,7 @@ private void buildServerCursorPrepExecParams(TDSWriter tdsWriter) throws SQLServ private void buildPrepExecParams(TDSWriter tdsWriter) throws SQLServerException { if (getStatementLogger().isLoggable(java.util.logging.Level.FINE)) - getStatementLogger().fine(toString() + ": calling sp_prepexec: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL); + getStatementLogger().fine(toString() + ": calling sp_prepexec: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL); expectPrepStmtHandle = true; executedSqlDirectly = true; @@ -673,8 +587,8 @@ private void buildPrepExecParams(TDSWriter tdsWriter) throws SQLServerException // // IN (reprepare): Old handle to unprepare before repreparing // OUT: The newly prepared handle - tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), true); - resetPrepStmtHandle(); + tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), true); + prepStmtHandle = 0; // IN tdsWriter.writeRPCStringUnicode((preparedTypeDefinitions.length() > 0) ? preparedTypeDefinitions : null); @@ -698,7 +612,7 @@ private void buildExecSQLParams(TDSWriter tdsWriter) throws SQLServerException { tdsWriter.writeByte((byte) 0); // RPC procedure option 2 // No handle used. - resetPrepStmtHandle(); + prepStmtHandle = 0; // IN tdsWriter.writeRPCStringUnicode(preparedSQL); @@ -709,7 +623,7 @@ private void buildExecSQLParams(TDSWriter tdsWriter) throws SQLServerException { private void buildServerCursorExecParams(TDSWriter tdsWriter) throws SQLServerException { if (getStatementLogger().isLoggable(java.util.logging.Level.FINE)) - getStatementLogger().fine(toString() + ": calling sp_cursorexecute: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL); + getStatementLogger().fine(toString() + ": calling sp_cursorexecute: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL); expectPrepStmtHandle = false; executedSqlDirectly = false; @@ -722,8 +636,8 @@ private void buildServerCursorExecParams(TDSWriter tdsWriter) throws SQLServerEx tdsWriter.writeByte((byte) 0); // RPC procedure option 2 */ // IN - assert hasPreparedStatementHandle(); - tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), false); + assert 0 != prepStmtHandle; + tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), false); // OUT tdsWriter.writeRPCInt(null, new Integer(0), true); @@ -740,7 +654,7 @@ private void buildServerCursorExecParams(TDSWriter tdsWriter) throws SQLServerEx private void buildExecParams(TDSWriter tdsWriter) throws SQLServerException { if (getStatementLogger().isLoggable(java.util.logging.Level.FINE)) - getStatementLogger().fine(toString() + ": calling sp_execute: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL); + getStatementLogger().fine(toString() + ": calling sp_execute: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL); expectPrepStmtHandle = false; executedSqlDirectly = true; @@ -753,8 +667,8 @@ private void buildExecParams(TDSWriter tdsWriter) throws SQLServerException { tdsWriter.writeByte((byte) 0); // RPC procedure option 2 */ // IN - assert hasPreparedStatementHandle(); - tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), false); + assert 0 != prepStmtHandle; + tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), false); } private void getParameterEncryptionMetadata(Parameter[] params) throws SQLServerException { @@ -898,63 +812,14 @@ private void getParameterEncryptionMetadata(Parameter[] params) throws SQLServer connection.resetCurrentCommand(); } - /** Manage re-using cached handles */ - private boolean reuseCachedHandle(boolean hasNewTypeDefinitions, boolean discardCurrentCacheItem) { - - // No re-use of caching for cursorable statements (statements that WILL use sp_cursor*) - if (isCursorable(executeMethod)) - return false; - - // If current cache item should be discarded make sure it is not used again. - if (discardCurrentCacheItem && null != cachedPreparedStatementHandle) { - - cachedPreparedStatementHandle.removeReference(); - - // Make sure the cached handle does not get re-used more. - resetPrepStmtHandle(); - cachedPreparedStatementHandle.setIsExplicitlyDiscarded(); - cachedPreparedStatementHandle = null; - - return false; - } - - // New type definitions and existing cached handle reference then deregister cached handle. - if(hasNewTypeDefinitions) { - if (null != cachedPreparedStatementHandle && hasPreparedStatementHandle() && prepStmtHandle == cachedPreparedStatementHandle.getHandle()) { - cachedPreparedStatementHandle.removeReference(); - } - cachedPreparedStatementHandle = null; - } - - // Check for new cache reference. - if (null == cachedPreparedStatementHandle) { - PreparedStatementHandle cachedHandle = connection.getCachedPreparedStatementHandle(new Sha1HashKey(preparedSQL, preparedTypeDefinitions)); - - // If handle was found then re-use, only if AE is not on, or if it is on, make sure encryptionMetadataIsRetrieved is retrieved. - if (null != cachedHandle) { - if (!connection.isColumnEncryptionSettingEnabled() - || (connection.isColumnEncryptionSettingEnabled() && encryptionMetadataIsRetrieved)) { - if (cachedHandle.tryAddReference()) { - setPreparedStatementHandle(cachedHandle.getHandle()); - cachedPreparedStatementHandle = cachedHandle; - return true; - } - } - } - } - return false; - } - private boolean doPrepExec(TDSWriter tdsWriter, Parameter[] params, - boolean hasNewTypeDefinitions, - boolean hasExistingTypeDefinitions) throws SQLServerException { - - boolean needsPrepare = (hasNewTypeDefinitions && hasExistingTypeDefinitions) || !hasPreparedStatementHandle(); + boolean hasNewTypeDefinitions) throws SQLServerException { + + boolean needsPrepare = hasNewTypeDefinitions || 0 == prepStmtHandle; - // Cursors don't use statement pooling. + // Cursors never go the non-prepared statement route. if (isCursorable(executeMethod)) { - if (needsPrepare) buildServerCursorPrepExecParams(tdsWriter); else @@ -963,10 +828,7 @@ private boolean doPrepExec(TDSWriter tdsWriter, else { // Move overhead of needing to do prepare & unprepare to only use cases that need more than one execution. // First execution, use sp_executesql, optimizing for asumption we will not re-use statement. - if (needsPrepare - && !connection.getEnablePrepareOnFirstPreparedStatementCall() - && !isExecutedAtLeastOnce - ) { + if (!connection.getEnablePrepareOnFirstPreparedStatementCall() && !isExecutedAtLeastOnce) { buildExecSQLParams(tdsWriter); isExecutedAtLeastOnce = true; } @@ -1015,7 +877,10 @@ else if (resultSet != null) { * @return the result set containing the meta data */ /* L0 */ private ResultSet buildExecuteMetaData() throws SQLServerException { - String fmtSQL = userSQL; + String fmtSQL = sqlCommand; + if (fmtSQL.indexOf(LEFT_CURLY_BRACKET) >= 0) { + fmtSQL = (new JDBCSyntaxTranslator()).translate(fmtSQL); + } ResultSet emptyResultSet = null; try { @@ -2557,10 +2422,8 @@ final void doExecutePreparedStatementBatch(PrepStmtBatchExecCmd batchCommand) th assert paramValues.length == batchParam.length; for (int i = 0; i < paramValues.length; i++) batchParam[i] = paramValues[i]; - - boolean hasExistingTypeDefinitions = preparedTypeDefinitions != null; - boolean hasNewTypeDefinitions = buildPreparedStrings(batchParam, false); + boolean hasNewTypeDefinitions = buildPreparedStrings(batchParam, false); // Get the encryption metadata for the first batch only. if ((0 == numBatchesExecuted) && (Util.shouldHonorAEForParameters(stmtColumnEncriptionSetting, connection)) && (0 < batchParam.length) && !isInternalEncryptionQuery && !encryptionMetadataIsRetrieved) { @@ -2583,108 +2446,73 @@ final void doExecutePreparedStatementBatch(PrepStmtBatchExecCmd batchCommand) th } } - // Retry execution if existing handle could not be re-used. - for(int attempt = 1; attempt <= 2; ++attempt) { - - try { + if (numBatchesExecuted < numBatchesPrepared) { + // assert null != tdsWriter; + tdsWriter.writeByte((byte) nBatchStatementDelimiter); + } + else { + resetForReexecute(); + tdsWriter = batchCommand.startRequest(TDS.PKT_RPC); + } - // Re-use handle if available, requires parameter definitions which are not available until here. - if (reuseCachedHandle(hasNewTypeDefinitions, 1 < attempt)) { - hasNewTypeDefinitions = false; - } - - if (numBatchesExecuted < numBatchesPrepared) { - // assert null != tdsWriter; - tdsWriter.writeByte((byte) nBatchStatementDelimiter); + // If we have to (re)prepare the statement then we must execute it so + // that we get back a (new) prepared statement handle to use to + // execute additional batches. + // + // We must always prepare the statement the first time through. + // But we may also need to reprepare the statement if, for example, + // the size of a batch's string parameter values changes such + // that repreparation is necessary. + ++numBatchesPrepared; + if (doPrepExec(tdsWriter, batchParam, hasNewTypeDefinitions) || numBatchesPrepared == numBatches) { + ensureExecuteResultsReader(batchCommand.startResponse(getIsResponseBufferingAdaptive())); + + while (numBatchesExecuted < numBatchesPrepared) { + // NOTE: + // When making changes to anything below, consider whether similar changes need + // to be made to Statement batch execution. + + startResults(); + + try { + // Get the first result from the batch. If there is no result for this batch + // then bail, leaving EXECUTE_FAILED in the current and remaining slots of + // the update count array. + if (!getNextResult()) + return; + + // If the result is a ResultSet (rather than an update count) then throw an + // exception for this result. The exception gets caught immediately below and + // translated into (or added to) a BatchUpdateException. + if (null != resultSet) { + SQLServerException.makeFromDriverError(connection, this, SQLServerException.getErrString("R_resultsetGeneratedForUpdate"), + null, false); + } } - else { - resetForReexecute(); - tdsWriter = batchCommand.startRequest(TDS.PKT_RPC); + catch (SQLServerException e) { + // If the failure was severe enough to close the connection or roll back a + // manual transaction, then propagate the error up as a SQLServerException + // now, rather than continue with the batch. + if (connection.isSessionUnAvailable() || connection.rolledBackTransaction()) + throw e; + + // Otherwise, the connection is OK and the transaction is still intact, + // so just record the failure for the particular batch item. + updateCount = Statement.EXECUTE_FAILED; + if (null == batchCommand.batchException) + batchCommand.batchException = e; } - // If we have to (re)prepare the statement then we must execute it so - // that we get back a (new) prepared statement handle to use to - // execute additional batches. - // - // We must always prepare the statement the first time through. - // But we may also need to reprepare the statement if, for example, - // the size of a batch's string parameter values changes such - // that repreparation is necessary. - ++numBatchesPrepared; - - if (doPrepExec(tdsWriter, batchParam, hasNewTypeDefinitions, hasExistingTypeDefinitions) || numBatchesPrepared == numBatches) { - ensureExecuteResultsReader(batchCommand.startResponse(getIsResponseBufferingAdaptive())); - - boolean retry = false; - while (numBatchesExecuted < numBatchesPrepared) { - // NOTE: - // When making changes to anything below, consider whether similar changes need - // to be made to Statement batch execution. - - startResults(); - - try { - // Get the first result from the batch. If there is no result for this batch - // then bail, leaving EXECUTE_FAILED in the current and remaining slots of - // the update count array. - if (!getNextResult()) - return; - - // If the result is a ResultSet (rather than an update count) then throw an - // exception for this result. The exception gets caught immediately below and - // translated into (or added to) a BatchUpdateException. - if (null != resultSet) { - SQLServerException.makeFromDriverError(connection, this, SQLServerException.getErrString("R_resultsetGeneratedForUpdate"), - null, false); - } - } - catch (SQLServerException e) { - // If the failure was severe enough to close the connection or roll back a - // manual transaction, then propagate the error up as a SQLServerException - // now, rather than continue with the batch. - if (connection.isSessionUnAvailable() || connection.rolledBackTransaction()) - throw e; - - // Retry if invalid handle exception. - if (retryBasedOnFailedReuseOfCachedHandle(e, attempt)) { - // Reset number of batches prepared. - numBatchesPrepared = numBatchesExecuted; - retry = true; - break; - } - - // Otherwise, the connection is OK and the transaction is still intact, - // so just record the failure for the particular batch item. - updateCount = Statement.EXECUTE_FAILED; - if (null == batchCommand.batchException) - batchCommand.batchException = e; - } - - // In batch execution, we have a special update count - // to indicate that no information was returned - batchCommand.updateCounts[numBatchesExecuted] = (-1 == updateCount) ? Statement.SUCCESS_NO_INFO : updateCount; - processBatch(); - - numBatchesExecuted++; - } - if(retry) - continue; + // In batch execution, we have a special update count + // to indicate that no information was returned + batchCommand.updateCounts[numBatchesExecuted++] = (-1 == updateCount) ? Statement.SUCCESS_NO_INFO : updateCount; - // Only way to proceed with preparing the next set of batches is if - // we successfully executed the previously prepared set. - assert numBatchesExecuted == numBatchesPrepared; - } + processBatch(); } - catch(SQLException e) { - if (retryBasedOnFailedReuseOfCachedHandle(e, attempt)) { - // Reset number of batches prepared. - numBatchesPrepared = numBatchesExecuted; - continue; - } - else - throw e; - } - break; + + // Only way to proceed with preparing the next set of batches is if + // we successfully executed the previously prepared set. + assert numBatchesExecuted == numBatchesPrepared; } } } @@ -2962,41 +2790,14 @@ public final void setNull(int paramIndex, loggerExternal.exiting(getClassNameLogging(), "setNull"); } - /** - * Returns parameter metadata for the prepared statement. - * - * @param forceRefresh: - * If true the cache will not be used to retrieve the metadata. - * - * @return - * Per the description. - * - * @throws SQLServerException when an error occurs - */ - public final ParameterMetaData getParameterMetaData(boolean forceRefresh) throws SQLServerException { - - SQLServerParameterMetaData pmd = this.connection.getCachedParameterMetadata(sqlTextCacheKey); - - if (!forceRefresh && null != pmd) { - return pmd; - } - else { - loggerExternal.entering(getClassNameLogging(), "getParameterMetaData"); - checkClosed(); - pmd = new SQLServerParameterMetaData(this, userSQL); - - connection.registerCachedParameterMetadata(sqlTextCacheKey, pmd); - - loggerExternal.exiting(getClassNameLogging(), "getParameterMetaData", pmd); - - return pmd; - } - } - /* JDBC 3.0 */ /* L3 */ public final ParameterMetaData getParameterMetaData() throws SQLServerException { - return getParameterMetaData(false); + loggerExternal.entering(getClassNameLogging(), "getParameterMetaData"); + checkClosed(); + SQLServerParameterMetaData pmd = new SQLServerParameterMetaData(this, userSQL); + loggerExternal.exiting(getClassNameLogging(), "getParameterMetaData", pmd); + return pmd; } /* L3 */ public final void setURL(int parameterIndex, diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java index 4386a6c5c..d4cc1bb7d 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java @@ -190,7 +190,6 @@ protected Object[][] getContents() { {"R_socketTimeoutPropertyDescription", "The number of milliseconds to wait before the java.net.SocketTimeoutException is raised."}, {"R_serverPreparedStatementDiscardThresholdPropertyDescription", "The threshold for when to close discarded prepare statements on the server (calling a batch of sp_unprepares). A value of 1 or less will cause sp_unprepare to be called immediately on PreparedStatment close."}, {"R_enablePrepareOnFirstPreparedStatementCallPropertyDescription", "This setting specifies whether a prepared statement is prepared (sp_prepexec) on first use (property=true) or on second after first calling sp_executesql (property=false)."}, - {"R_statementPoolingCacheSizePropertyDescription", "This setting specifies the size of the prepared statement cache for a conection. A value less than 1 means no cache."}, {"R_gsscredentialPropertyDescription", "Impersonated GSS Credential to access SQL Server."}, {"R_noParserSupport", "An error occurred while instantiating the required parser. Error: \"{0}\""}, {"R_writeOnlyXML", "Cannot read from this SQLXML instance. This instance is for writing data only."}, @@ -380,7 +379,6 @@ protected Object[][] getContents() { {"R_invalidFipsEncryptConfig", "Could not enable FIPS due to either encrypt is not true or using trusted certificate settings."}, {"R_invalidFipsProviderConfig", "Could not enable FIPS due to invalid FIPSProvider or TrustStoreType."}, {"R_serverPreparedStatementDiscardThreshold", "The serverPreparedStatementDiscardThreshold {0} is not valid."}, - {"R_statementPoolingCacheSize", "The statementPoolingCacheSize {0} is not valid."}, {"R_kerberosLoginFailedForUsername", "Cannot login with Kerberos principal {0}, check your credentials. {1}"}, {"R_kerberosLoginFailed", "Kerberos Login failed: {0} due to {1} ({2})"}, {"R_StoredProcedureNotFound", "Could not find stored procedure ''{0}''."}, diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java index a6a4e93ec..a49a8832d 100644 --- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java +++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java @@ -8,9 +8,6 @@ package com.microsoft.sqlserver.jdbc; -import static com.microsoft.sqlserver.jdbc.SQLServerConnection.getCachedParsedSQL; -import static com.microsoft.sqlserver.jdbc.SQLServerConnection.parseAndCacheSQL; - import java.sql.BatchUpdateException; import java.sql.ResultSet; import java.sql.SQLException; @@ -27,8 +24,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import com.microsoft.sqlserver.jdbc.SQLServerConnection.Sha1HashKey; - /** * SQLServerStatment provides the basic implementation of JDBC statement functionality. It also provides a number of base class implementation methods * for the JDBC prepared statement and callable Statements. SQLServerStatement's basic role is to execute SQL statements and return update counts and @@ -766,17 +761,10 @@ final void processResponse(TDSReader tdsReader) throws SQLServerException { private String ensureSQLSyntax(String sql) throws SQLServerException { if (sql.indexOf(LEFT_CURLY_BRACKET) >= 0) { - - Sha1HashKey cacheKey = new Sha1HashKey(sql); - - // Check for cached SQL metadata. - ParsedSQLCacheItem cacheItem = getCachedParsedSQL(cacheKey); - if (null == cacheItem) - cacheItem = parseAndCacheSQL(cacheKey, sql); - - // Retrieve from cache item. - procedureName = cacheItem.procedureName; - return cacheItem.processedSQL; + JDBCSyntaxTranslator translator = new JDBCSyntaxTranslator(); + String execSyntax = translator.translate(sql); + procedureName = translator.getProcedureName(); + return execSyntax; } return sql; diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java deleted file mode 100644 index a52c70e7b..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java +++ /dev/null @@ -1,1582 +0,0 @@ -/* - * Copyright 2010 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mssql.googlecode.concurrentlinkedhashmap; - -import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.IDLE; -import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.PROCESSING; -import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.REQUIRED; -import static java.util.Collections.emptyList; -import static java.util.Collections.unmodifiableMap; -import static java.util.Collections.unmodifiableSet; - -import java.io.InvalidObjectException; -import java.io.ObjectInputStream; -import java.io.Serializable; -import java.util.AbstractCollection; -import java.util.AbstractMap; -import java.util.AbstractQueue; -import java.util.AbstractSet; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * A hash table supporting full concurrency of retrievals, adjustable expected - * concurrency for updates, and a maximum capacity to bound the map by. This - * implementation differs from {@link ConcurrentHashMap} in that it maintains a - * page replacement algorithm that is used to evict an entry when the map has - * exceeded its capacity. Unlike the Java Collections Framework, this - * map does not have a publicly visible constructor and instances are created - * through a {@link Builder}. - *

- * An entry is evicted from the map when the weighted capacity exceeds - * its maximum weighted capacity threshold. A {@link EntryWeigher} - * determines how many units of capacity that an entry consumes. The default - * weigher assigns each value a weight of 1 to bound the map by the - * total number of key-value pairs. A map that holds collections may choose to - * weigh values by the number of elements in the collection and bound the map - * by the total number of elements that it contains. A change to a value that - * modifies its weight requires that an update operation is performed on the - * map. - *

- * An {@link EvictionListener} may be supplied for notification when an entry - * is evicted from the map. This listener is invoked on a caller's thread and - * will not block other threads from operating on the map. An implementation - * should be aware that the caller's thread will not expect long execution - * times or failures as a side effect of the listener being notified. Execution - * safety and a fast turn around time can be achieved by performing the - * operation asynchronously, such as by submitting a task to an - * {@link java.util.concurrent.ExecutorService}. - *

- * The concurrency level determines the number of threads that can - * concurrently modify the table. Using a significantly higher or lower value - * than needed can waste space or lead to thread contention, but an estimate - * within an order of magnitude of the ideal value does not usually have a - * noticeable impact. Because placement in hash tables is essentially random, - * the actual concurrency will vary. - *

- * This class and its views and iterators implement all of the - * optional methods of the {@link Map} and {@link Iterator} - * interfaces. - *

- * Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class - * does not allow null to be used as a key or value. Unlike - * {@link java.util.LinkedHashMap}, this class does not provide - * predictable iteration order. A snapshot of the keys and entries may be - * obtained in ascending and descending order of retention. - * - * @author ben.manes@gmail.com (Ben Manes) - * @param the type of keys maintained by this map - * @param the type of mapped values - * @see - * http://code.google.com/p/concurrentlinkedhashmap/ - */ -public final class ConcurrentLinkedHashMap extends AbstractMap - implements ConcurrentMap, Serializable { - - /* - * This class performs a best-effort bounding of a ConcurrentHashMap using a - * page-replacement algorithm to determine which entries to evict when the - * capacity is exceeded. - * - * The page replacement algorithm's data structures are kept eventually - * consistent with the map. An update to the map and recording of reads may - * not be immediately reflected on the algorithm's data structures. These - * structures are guarded by a lock and operations are applied in batches to - * avoid lock contention. The penalty of applying the batches is spread across - * threads so that the amortized cost is slightly higher than performing just - * the ConcurrentHashMap operation. - * - * A memento of the reads and writes that were performed on the map are - * recorded in buffers. These buffers are drained at the first opportunity - * after a write or when the read buffer exceeds a threshold size. The reads - * are recorded in a lossy buffer, allowing the reordering operations to be - * discarded if the draining process cannot keep up. Due to the concurrent - * nature of the read and write operations a strict policy ordering is not - * possible, but is observably strict when single threaded. - * - * Due to a lack of a strict ordering guarantee, a task can be executed - * out-of-order, such as a removal followed by its addition. The state of the - * entry is encoded within the value's weight. - * - * Alive: The entry is in both the hash-table and the page replacement policy. - * This is represented by a positive weight. - * - * Retired: The entry is not in the hash-table and is pending removal from the - * page replacement policy. This is represented by a negative weight. - * - * Dead: The entry is not in the hash-table and is not in the page replacement - * policy. This is represented by a weight of zero. - * - * The Least Recently Used page replacement algorithm was chosen due to its - * simplicity, high hit rate, and ability to be implemented with O(1) time - * complexity. - */ - - /** The number of CPUs */ - static final int NCPU = Runtime.getRuntime().availableProcessors(); - - /** The maximum weighted capacity of the map. */ - static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE; - - /** The number of read buffers to use. */ - static final int NUMBER_OF_READ_BUFFERS = ceilingNextPowerOfTwo(NCPU); - - /** Mask value for indexing into the read buffers. */ - static final int READ_BUFFERS_MASK = NUMBER_OF_READ_BUFFERS - 1; - - /** The number of pending read operations before attempting to drain. */ - static final int READ_BUFFER_THRESHOLD = 32; - - /** The maximum number of read operations to perform per amortized drain. */ - static final int READ_BUFFER_DRAIN_THRESHOLD = 2 * READ_BUFFER_THRESHOLD; - - /** The maximum number of pending reads per buffer. */ - static final int READ_BUFFER_SIZE = 2 * READ_BUFFER_DRAIN_THRESHOLD; - - /** Mask value for indexing into the read buffer. */ - static final int READ_BUFFER_INDEX_MASK = READ_BUFFER_SIZE - 1; - - /** The maximum number of write operations to perform per amortized drain. */ - static final int WRITE_BUFFER_DRAIN_THRESHOLD = 16; - - /** A queue that discards all entries. */ - static final Queue DISCARDING_QUEUE = new DiscardingQueue(); - - static int ceilingNextPowerOfTwo(int x) { - // From Hacker's Delight, Chapter 3, Harry S. Warren Jr. - return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1)); - } - - // The backing data store holding the key-value associations - final ConcurrentMap> data; - final int concurrencyLevel; - - // These fields provide support to bound the map by a maximum capacity - final long[] readBufferReadCount; - final LinkedDeque> evictionDeque; - - final AtomicLong weightedSize; - final AtomicLong capacity; - - final Lock evictionLock; - final Queue writeBuffer; - final AtomicLong[] readBufferWriteCount; - final AtomicLong[] readBufferDrainAtWriteCount; - final AtomicReference>[][] readBuffers; - - final AtomicReference drainStatus; - final EntryWeigher weigher; - - // These fields provide support for notifying a listener. - final Queue> pendingNotifications; - final EvictionListener listener; - - transient Set keySet; - transient Collection values; - transient Set> entrySet; - - /** - * Creates an instance based on the builder's configuration. - */ - @SuppressWarnings({"unchecked", "cast"}) - private ConcurrentLinkedHashMap(Builder builder) { - // The data store and its maximum capacity - concurrencyLevel = builder.concurrencyLevel; - capacity = new AtomicLong(Math.min(builder.capacity, MAXIMUM_CAPACITY)); - data = new ConcurrentHashMap>(builder.initialCapacity, 0.75f, concurrencyLevel); - - // The eviction support - weigher = builder.weigher; - evictionLock = new ReentrantLock(); - weightedSize = new AtomicLong(); - evictionDeque = new LinkedDeque>(); - writeBuffer = new ConcurrentLinkedQueue(); - drainStatus = new AtomicReference(IDLE); - - readBufferReadCount = new long[NUMBER_OF_READ_BUFFERS]; - readBufferWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS]; - readBufferDrainAtWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS]; - readBuffers = new AtomicReference[NUMBER_OF_READ_BUFFERS][READ_BUFFER_SIZE]; - for (int i = 0; i < NUMBER_OF_READ_BUFFERS; i++) { - readBufferWriteCount[i] = new AtomicLong(); - readBufferDrainAtWriteCount[i] = new AtomicLong(); - readBuffers[i] = new AtomicReference[READ_BUFFER_SIZE]; - for (int j = 0; j < READ_BUFFER_SIZE; j++) { - readBuffers[i][j] = new AtomicReference>(); - } - } - - // The notification queue and listener - listener = builder.listener; - pendingNotifications = (listener == DiscardingListener.INSTANCE) - ? (Queue>) DISCARDING_QUEUE - : new ConcurrentLinkedQueue>(); - } - - /** Ensures that the object is not null. */ - static void checkNotNull(Object o) { - if (o == null) { - throw new NullPointerException(); - } - } - - /** Ensures that the argument expression is true. */ - static void checkArgument(boolean expression) { - if (!expression) { - throw new IllegalArgumentException(); - } - } - - /** Ensures that the state expression is true. */ - static void checkState(boolean expression) { - if (!expression) { - throw new IllegalStateException(); - } - } - - /* ---------------- Eviction Support -------------- */ - - /** - * Retrieves the maximum weighted capacity of the map. - * - * @return the maximum weighted capacity - */ - public long capacity() { - return capacity.get(); - } - - /** - * Sets the maximum weighted capacity of the map and eagerly evicts entries - * until it shrinks to the appropriate size. - * - * @param capacity the maximum weighted capacity of the map - * @throws IllegalArgumentException if the capacity is negative - */ - public void setCapacity(long capacity) { - checkArgument(capacity >= 0); - evictionLock.lock(); - try { - this.capacity.lazySet(Math.min(capacity, MAXIMUM_CAPACITY)); - drainBuffers(); - evict(); - } finally { - evictionLock.unlock(); - } - notifyListener(); - } - - /** Determines whether the map has exceeded its capacity. */ - boolean hasOverflowed() { - return weightedSize.get() > capacity.get(); - } - - /** - * Evicts entries from the map while it exceeds the capacity and appends - * evicted entries to the notification queue for processing. - */ - void evict() { - // Attempts to evict entries from the map if it exceeds the maximum - // capacity. If the eviction fails due to a concurrent removal of the - // victim, that removal may cancel out the addition that triggered this - // eviction. The victim is eagerly unlinked before the removal task so - // that if an eviction is still required then a new victim will be chosen - // for removal. - while (hasOverflowed()) { - final Node node = evictionDeque.poll(); - - // If weighted values are used, then the pending operations will adjust - // the size to reflect the correct weight - if (node == null) { - return; - } - - // Notify the listener only if the entry was evicted - if (data.remove(node.key, node)) { - pendingNotifications.add(node); - } - - makeDead(node); - } - } - - /** - * Performs the post-processing work required after a read. - * - * @param node the entry in the page replacement policy - */ - void afterRead(Node node) { - final int bufferIndex = readBufferIndex(); - final long writeCount = recordRead(bufferIndex, node); - drainOnReadIfNeeded(bufferIndex, writeCount); - notifyListener(); - } - - /** Returns the index to the read buffer to record into. */ - static int readBufferIndex() { - // A buffer is chosen by the thread's id so that tasks are distributed in a - // pseudo evenly manner. This helps avoid hot entries causing contention - // due to other threads trying to append to the same buffer. - return ((int) Thread.currentThread().getId()) & READ_BUFFERS_MASK; - } - - /** - * Records a read in the buffer and return its write count. - * - * @param bufferIndex the index to the chosen read buffer - * @param node the entry in the page replacement policy - * @return the number of writes on the chosen read buffer - */ - long recordRead(int bufferIndex, Node node) { - // The location in the buffer is chosen in a racy fashion as the increment - // is not atomic with the insertion. This means that concurrent reads can - // overlap and overwrite one another, resulting in a lossy buffer. - final AtomicLong counter = readBufferWriteCount[bufferIndex]; - final long writeCount = counter.get(); - counter.lazySet(writeCount + 1); - - final int index = (int) (writeCount & READ_BUFFER_INDEX_MASK); - readBuffers[bufferIndex][index].lazySet(node); - - return writeCount; - } - - /** - * Attempts to drain the buffers if it is determined to be needed when - * post-processing a read. - * - * @param bufferIndex the index to the chosen read buffer - * @param writeCount the number of writes on the chosen read buffer - */ - void drainOnReadIfNeeded(int bufferIndex, long writeCount) { - final long pending = (writeCount - readBufferDrainAtWriteCount[bufferIndex].get()); - final boolean delayable = (pending < READ_BUFFER_THRESHOLD); - final DrainStatus status = drainStatus.get(); - if (status.shouldDrainBuffers(delayable)) { - tryToDrainBuffers(); - } - } - - /** - * Performs the post-processing work required after a write. - * - * @param task the pending operation to be applied - */ - void afterWrite(Runnable task) { - writeBuffer.add(task); - drainStatus.lazySet(REQUIRED); - tryToDrainBuffers(); - notifyListener(); - } - - /** - * Attempts to acquire the eviction lock and apply the pending operations, up - * to the amortized threshold, to the page replacement policy. - */ - void tryToDrainBuffers() { - if (evictionLock.tryLock()) { - try { - drainStatus.lazySet(PROCESSING); - drainBuffers(); - } finally { - drainStatus.compareAndSet(PROCESSING, IDLE); - evictionLock.unlock(); - } - } - } - - /** Drains the read and write buffers up to an amortized threshold. */ - void drainBuffers() { - drainReadBuffers(); - drainWriteBuffer(); - } - - /** Drains the read buffers, each up to an amortized threshold. */ - void drainReadBuffers() { - final int start = (int) Thread.currentThread().getId(); - final int end = start + NUMBER_OF_READ_BUFFERS; - for (int i = start; i < end; i++) { - drainReadBuffer(i & READ_BUFFERS_MASK); - } - } - - /** Drains the read buffer up to an amortized threshold. */ - void drainReadBuffer(int bufferIndex) { - final long writeCount = readBufferWriteCount[bufferIndex].get(); - for (int i = 0; i < READ_BUFFER_DRAIN_THRESHOLD; i++) { - final int index = (int) (readBufferReadCount[bufferIndex] & READ_BUFFER_INDEX_MASK); - final AtomicReference> slot = readBuffers[bufferIndex][index]; - final Node node = slot.get(); - if (node == null) { - break; - } - - slot.lazySet(null); - applyRead(node); - readBufferReadCount[bufferIndex]++; - } - readBufferDrainAtWriteCount[bufferIndex].lazySet(writeCount); - } - - /** Updates the node's location in the page replacement policy. */ - void applyRead(Node node) { - // An entry may be scheduled for reordering despite having been removed. - // This can occur when the entry was concurrently read while a writer was - // removing it. If the entry is no longer linked then it does not need to - // be processed. - if (evictionDeque.contains(node)) { - evictionDeque.moveToBack(node); - } - } - - /** Drains the read buffer up to an amortized threshold. */ - void drainWriteBuffer() { - for (int i = 0; i < WRITE_BUFFER_DRAIN_THRESHOLD; i++) { - final Runnable task = writeBuffer.poll(); - if (task == null) { - break; - } - task.run(); - } - } - - /** - * Attempts to transition the node from the alive state to the - * retired state. - * - * @param node the entry in the page replacement policy - * @param expect the expected weighted value - * @return if successful - */ - boolean tryToRetire(Node node, WeightedValue expect) { - if (expect.isAlive()) { - final WeightedValue retired = new WeightedValue(expect.value, -expect.weight); - return node.compareAndSet(expect, retired); - } - return false; - } - - /** - * Atomically transitions the node from the alive state to the - * retired state, if a valid transition. - * - * @param node the entry in the page replacement policy - */ - void makeRetired(Node node) { - for (;;) { - final WeightedValue current = node.get(); - if (!current.isAlive()) { - return; - } - final WeightedValue retired = new WeightedValue(current.value, -current.weight); - if (node.compareAndSet(current, retired)) { - return; - } - } - } - - /** - * Atomically transitions the node to the dead state and decrements - * the weightedSize. - * - * @param node the entry in the page replacement policy - */ - void makeDead(Node node) { - for (;;) { - WeightedValue current = node.get(); - WeightedValue dead = new WeightedValue(current.value, 0); - if (node.compareAndSet(current, dead)) { - weightedSize.lazySet(weightedSize.get() - Math.abs(current.weight)); - return; - } - } - } - - /** Notifies the listener of entries that were evicted. */ - void notifyListener() { - Node node; - while ((node = pendingNotifications.poll()) != null) { - listener.onEviction(node.key, node.getValue()); - } - } - - /** Adds the node to the page replacement policy. */ - final class AddTask implements Runnable { - final Node node; - final int weight; - - AddTask(Node node, int weight) { - this.weight = weight; - this.node = node; - } - - @Override - public void run() { - weightedSize.lazySet(weightedSize.get() + weight); - - // ignore out-of-order write operations - if (node.get().isAlive()) { - evictionDeque.add(node); - evict(); - } - } - } - - /** Removes a node from the page replacement policy. */ - final class RemovalTask implements Runnable { - final Node node; - - RemovalTask(Node node) { - this.node = node; - } - - @Override - public void run() { - // add may not have been processed yet - evictionDeque.remove(node); - makeDead(node); - } - } - - /** Updates the weighted size and evicts an entry on overflow. */ - final class UpdateTask implements Runnable { - final int weightDifference; - final Node node; - - public UpdateTask(Node node, int weightDifference) { - this.weightDifference = weightDifference; - this.node = node; - } - - @Override - public void run() { - weightedSize.lazySet(weightedSize.get() + weightDifference); - applyRead(node); - evict(); - } - } - - /* ---------------- Concurrent Map Support -------------- */ - - @Override - public boolean isEmpty() { - return data.isEmpty(); - } - - @Override - public int size() { - return data.size(); - } - - /** - * Returns the weighted size of this map. - * - * @return the combined weight of the values in this map - */ - public long weightedSize() { - return Math.max(0, weightedSize.get()); - } - - @Override - public void clear() { - evictionLock.lock(); - try { - // Discard all entries - Node node; - while ((node = evictionDeque.poll()) != null) { - data.remove(node.key, node); - makeDead(node); - } - - // Discard all pending reads - for (AtomicReference>[] buffer : readBuffers) { - for (AtomicReference> slot : buffer) { - slot.lazySet(null); - } - } - - // Apply all pending writes - Runnable task; - while ((task = writeBuffer.poll()) != null) { - task.run(); - } - } finally { - evictionLock.unlock(); - } - } - - @Override - public boolean containsKey(Object key) { - return data.containsKey(key); - } - - @Override - public boolean containsValue(Object value) { - checkNotNull(value); - - for (Node node : data.values()) { - if (node.getValue().equals(value)) { - return true; - } - } - return false; - } - - @Override - public V get(Object key) { - final Node node = data.get(key); - if (node == null) { - return null; - } - afterRead(node); - return node.getValue(); - } - - /** - * Returns the value to which the specified key is mapped, or {@code null} - * if this map contains no mapping for the key. This method differs from - * {@link #get(Object)} in that it does not record the operation with the - * page replacement policy. - * - * @param key the key whose associated value is to be returned - * @return the value to which the specified key is mapped, or - * {@code null} if this map contains no mapping for the key - * @throws NullPointerException if the specified key is null - */ - public V getQuietly(Object key) { - final Node node = data.get(key); - return (node == null) ? null : node.getValue(); - } - - @Override - public V put(K key, V value) { - return put(key, value, false); - } - - @Override - public V putIfAbsent(K key, V value) { - return put(key, value, true); - } - - /** - * Adds a node to the list and the data store. If an existing node is found, - * then its value is updated if allowed. - * - * @param key key with which the specified value is to be associated - * @param value value to be associated with the specified key - * @param onlyIfAbsent a write is performed only if the key is not already - * associated with a value - * @return the prior value in the data store or null if no mapping was found - */ - V put(K key, V value, boolean onlyIfAbsent) { - checkNotNull(key); - checkNotNull(value); - - final int weight = weigher.weightOf(key, value); - final WeightedValue weightedValue = new WeightedValue(value, weight); - final Node node = new Node(key, weightedValue); - - for (;;) { - final Node prior = data.putIfAbsent(node.key, node); - if (prior == null) { - afterWrite(new AddTask(node, weight)); - return null; - } else if (onlyIfAbsent) { - afterRead(prior); - return prior.getValue(); - } - for (;;) { - final WeightedValue oldWeightedValue = prior.get(); - if (!oldWeightedValue.isAlive()) { - break; - } - - if (prior.compareAndSet(oldWeightedValue, weightedValue)) { - final int weightedDifference = weight - oldWeightedValue.weight; - if (weightedDifference == 0) { - afterRead(prior); - } else { - afterWrite(new UpdateTask(prior, weightedDifference)); - } - return oldWeightedValue.value; - } - } - } - } - - @Override - public V remove(Object key) { - final Node node = data.remove(key); - if (node == null) { - return null; - } - - makeRetired(node); - afterWrite(new RemovalTask(node)); - return node.getValue(); - } - - @Override - public boolean remove(Object key, Object value) { - final Node node = data.get(key); - if ((node == null) || (value == null)) { - return false; - } - - WeightedValue weightedValue = node.get(); - for (;;) { - if (weightedValue.contains(value)) { - if (tryToRetire(node, weightedValue)) { - if (data.remove(key, node)) { - afterWrite(new RemovalTask(node)); - return true; - } - } else { - weightedValue = node.get(); - if (weightedValue.isAlive()) { - // retry as an intermediate update may have replaced the value with - // an equal instance that has a different reference identity - continue; - } - } - } - return false; - } - } - - @Override - public V replace(K key, V value) { - checkNotNull(key); - checkNotNull(value); - - final int weight = weigher.weightOf(key, value); - final WeightedValue weightedValue = new WeightedValue(value, weight); - - final Node node = data.get(key); - if (node == null) { - return null; - } - for (;;) { - final WeightedValue oldWeightedValue = node.get(); - if (!oldWeightedValue.isAlive()) { - return null; - } - if (node.compareAndSet(oldWeightedValue, weightedValue)) { - final int weightedDifference = weight - oldWeightedValue.weight; - if (weightedDifference == 0) { - afterRead(node); - } else { - afterWrite(new UpdateTask(node, weightedDifference)); - } - return oldWeightedValue.value; - } - } - } - - @Override - public boolean replace(K key, V oldValue, V newValue) { - checkNotNull(key); - checkNotNull(oldValue); - checkNotNull(newValue); - - final int weight = weigher.weightOf(key, newValue); - final WeightedValue newWeightedValue = new WeightedValue(newValue, weight); - - final Node node = data.get(key); - if (node == null) { - return false; - } - for (;;) { - final WeightedValue weightedValue = node.get(); - if (!weightedValue.isAlive() || !weightedValue.contains(oldValue)) { - return false; - } - if (node.compareAndSet(weightedValue, newWeightedValue)) { - final int weightedDifference = weight - weightedValue.weight; - if (weightedDifference == 0) { - afterRead(node); - } else { - afterWrite(new UpdateTask(node, weightedDifference)); - } - return true; - } - } - } - - @Override - public Set keySet() { - final Set ks = keySet; - return (ks == null) ? (keySet = new KeySet()) : ks; - } - - /** - * Returns a unmodifiable snapshot {@link Set} view of the keys contained in - * this map. The set's iterator returns the keys whose order of iteration is - * the ascending order in which its entries are considered eligible for - * retention, from the least-likely to be retained to the most-likely. - *

- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT - * a constant-time operation. Because of the asynchronous nature of the page - * replacement policy, determining the retention ordering requires a traversal - * of the keys. - * - * @return an ascending snapshot view of the keys in this map - */ - public Set ascendingKeySet() { - return ascendingKeySetWithLimit(Integer.MAX_VALUE); - } - - /** - * Returns an unmodifiable snapshot {@link Set} view of the keys contained in - * this map. The set's iterator returns the keys whose order of iteration is - * the ascending order in which its entries are considered eligible for - * retention, from the least-likely to be retained to the most-likely. - *

- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT - * a constant-time operation. Because of the asynchronous nature of the page - * replacement policy, determining the retention ordering requires a traversal - * of the keys. - * - * @param limit the maximum size of the returned set - * @return a ascending snapshot view of the keys in this map - * @throws IllegalArgumentException if the limit is negative - */ - public Set ascendingKeySetWithLimit(int limit) { - return orderedKeySet(true, limit); - } - - /** - * Returns an unmodifiable snapshot {@link Set} view of the keys contained in - * this map. The set's iterator returns the keys whose order of iteration is - * the descending order in which its entries are considered eligible for - * retention, from the most-likely to be retained to the least-likely. - *

- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT - * a constant-time operation. Because of the asynchronous nature of the page - * replacement policy, determining the retention ordering requires a traversal - * of the keys. - * - * @return a descending snapshot view of the keys in this map - */ - public Set descendingKeySet() { - return descendingKeySetWithLimit(Integer.MAX_VALUE); - } - - /** - * Returns an unmodifiable snapshot {@link Set} view of the keys contained in - * this map. The set's iterator returns the keys whose order of iteration is - * the descending order in which its entries are considered eligible for - * retention, from the most-likely to be retained to the least-likely. - *

- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT - * a constant-time operation. Because of the asynchronous nature of the page - * replacement policy, determining the retention ordering requires a traversal - * of the keys. - * - * @param limit the maximum size of the returned set - * @return a descending snapshot view of the keys in this map - * @throws IllegalArgumentException if the limit is negative - */ - public Set descendingKeySetWithLimit(int limit) { - return orderedKeySet(false, limit); - } - - Set orderedKeySet(boolean ascending, int limit) { - checkArgument(limit >= 0); - evictionLock.lock(); - try { - drainBuffers(); - - final int initialCapacity = (weigher == Weighers.entrySingleton()) - ? Math.min(limit, (int) weightedSize()) - : 16; - final Set keys = new LinkedHashSet(initialCapacity); - final Iterator> iterator = ascending - ? evictionDeque.iterator() - : evictionDeque.descendingIterator(); - while (iterator.hasNext() && (limit > keys.size())) { - keys.add(iterator.next().key); - } - return unmodifiableSet(keys); - } finally { - evictionLock.unlock(); - } - } - - @Override - public Collection values() { - final Collection vs = values; - return (vs == null) ? (values = new Values()) : vs; - } - - @Override - public Set> entrySet() { - final Set> es = entrySet; - return (es == null) ? (entrySet = new EntrySet()) : es; - } - - /** - * Returns an unmodifiable snapshot {@link Map} view of the mappings contained - * in this map. The map's collections return the mappings whose order of - * iteration is the ascending order in which its entries are considered - * eligible for retention, from the least-likely to be retained to the - * most-likely. - *

- * Beware that obtaining the mappings is NOT a constant-time - * operation. Because of the asynchronous nature of the page replacement - * policy, determining the retention ordering requires a traversal of the - * entries. - * - * @return a ascending snapshot view of this map - */ - public Map ascendingMap() { - return ascendingMapWithLimit(Integer.MAX_VALUE); - } - - /** - * Returns an unmodifiable snapshot {@link Map} view of the mappings contained - * in this map. The map's collections return the mappings whose order of - * iteration is the ascending order in which its entries are considered - * eligible for retention, from the least-likely to be retained to the - * most-likely. - *

- * Beware that obtaining the mappings is NOT a constant-time - * operation. Because of the asynchronous nature of the page replacement - * policy, determining the retention ordering requires a traversal of the - * entries. - * - * @param limit the maximum size of the returned map - * @return a ascending snapshot view of this map - * @throws IllegalArgumentException if the limit is negative - */ - public Map ascendingMapWithLimit(int limit) { - return orderedMap(true, limit); - } - - /** - * Returns an unmodifiable snapshot {@link Map} view of the mappings contained - * in this map. The map's collections return the mappings whose order of - * iteration is the descending order in which its entries are considered - * eligible for retention, from the most-likely to be retained to the - * least-likely. - *

- * Beware that obtaining the mappings is NOT a constant-time - * operation. Because of the asynchronous nature of the page replacement - * policy, determining the retention ordering requires a traversal of the - * entries. - * - * @return a descending snapshot view of this map - */ - public Map descendingMap() { - return descendingMapWithLimit(Integer.MAX_VALUE); - } - - /** - * Returns an unmodifiable snapshot {@link Map} view of the mappings contained - * in this map. The map's collections return the mappings whose order of - * iteration is the descending order in which its entries are considered - * eligible for retention, from the most-likely to be retained to the - * least-likely. - *

- * Beware that obtaining the mappings is NOT a constant-time - * operation. Because of the asynchronous nature of the page replacement - * policy, determining the retention ordering requires a traversal of the - * entries. - * - * @param limit the maximum size of the returned map - * @return a descending snapshot view of this map - * @throws IllegalArgumentException if the limit is negative - */ - public Map descendingMapWithLimit(int limit) { - return orderedMap(false, limit); - } - - Map orderedMap(boolean ascending, int limit) { - checkArgument(limit >= 0); - evictionLock.lock(); - try { - drainBuffers(); - - final int initialCapacity = (weigher == Weighers.entrySingleton()) - ? Math.min(limit, (int) weightedSize()) - : 16; - final Map map = new LinkedHashMap(initialCapacity); - final Iterator> iterator = ascending - ? evictionDeque.iterator() - : evictionDeque.descendingIterator(); - while (iterator.hasNext() && (limit > map.size())) { - Node node = iterator.next(); - map.put(node.key, node.getValue()); - } - return unmodifiableMap(map); - } finally { - evictionLock.unlock(); - } - } - - /** The draining status of the buffers. */ - enum DrainStatus { - - /** A drain is not taking place. */ - IDLE { - @Override boolean shouldDrainBuffers(boolean delayable) { - return !delayable; - } - }, - - /** A drain is required due to a pending write modification. */ - REQUIRED { - @Override boolean shouldDrainBuffers(boolean delayable) { - return true; - } - }, - - /** A drain is in progress. */ - PROCESSING { - @Override boolean shouldDrainBuffers(boolean delayable) { - return false; - } - }; - - /** - * Determines whether the buffers should be drained. - * - * @param delayable if a drain should be delayed until required - * @return if a drain should be attempted - */ - abstract boolean shouldDrainBuffers(boolean delayable); - } - - /** A value, its weight, and the entry's status. */ - static final class WeightedValue { - final int weight; - final V value; - - WeightedValue(V value, int weight) { - this.weight = weight; - this.value = value; - } - - boolean contains(Object o) { - return (o == value) || value.equals(o); - } - - /** - * If the entry is available in the hash-table and page replacement policy. - */ - boolean isAlive() { - return weight > 0; - } - - /** - * If the entry was removed from the hash-table and is awaiting removal from - * the page replacement policy. - */ - boolean isRetired() { - return weight < 0; - } - - /** - * If the entry was removed from the hash-table and the page replacement - * policy. - */ - boolean isDead() { - return weight == 0; - } - } - - /** - * A node contains the key, the weighted value, and the linkage pointers on - * the page-replacement algorithm's data structures. - */ - @SuppressWarnings("serial") - static final class Node extends AtomicReference> - implements Linked> { - final K key; - Node prev; - Node next; - - /** Creates a new, unlinked node. */ - Node(K key, WeightedValue weightedValue) { - super(weightedValue); - this.key = key; - } - - @Override - public Node getPrevious() { - return prev; - } - - @Override - public void setPrevious(Node prev) { - this.prev = prev; - } - - @Override - public Node getNext() { - return next; - } - - @Override - public void setNext(Node next) { - this.next = next; - } - - /** Retrieves the value held by the current WeightedValue. */ - V getValue() { - return get().value; - } - } - - /** An adapter to safely externalize the keys. */ - final class KeySet extends AbstractSet { - final ConcurrentLinkedHashMap map = ConcurrentLinkedHashMap.this; - - @Override - public int size() { - return map.size(); - } - - @Override - public void clear() { - map.clear(); - } - - @Override - public Iterator iterator() { - return new KeyIterator(); - } - - @Override - public boolean contains(Object obj) { - return containsKey(obj); - } - - @Override - public boolean remove(Object obj) { - return (map.remove(obj) != null); - } - - @Override - public Object[] toArray() { - return map.data.keySet().toArray(); - } - - @Override - public T[] toArray(T[] array) { - return map.data.keySet().toArray(array); - } - } - - /** An adapter to safely externalize the key iterator. */ - final class KeyIterator implements Iterator { - final Iterator iterator = data.keySet().iterator(); - K current; - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public K next() { - current = iterator.next(); - return current; - } - - @Override - public void remove() { - checkState(current != null); - ConcurrentLinkedHashMap.this.remove(current); - current = null; - } - } - - /** An adapter to safely externalize the values. */ - final class Values extends AbstractCollection { - - @Override - public int size() { - return ConcurrentLinkedHashMap.this.size(); - } - - @Override - public void clear() { - ConcurrentLinkedHashMap.this.clear(); - } - - @Override - public Iterator iterator() { - return new ValueIterator(); - } - - @Override - public boolean contains(Object o) { - return containsValue(o); - } - } - - /** An adapter to safely externalize the value iterator. */ - final class ValueIterator implements Iterator { - final Iterator> iterator = data.values().iterator(); - Node current; - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public V next() { - current = iterator.next(); - return current.getValue(); - } - - @Override - public void remove() { - checkState(current != null); - ConcurrentLinkedHashMap.this.remove(current.key); - current = null; - } - } - - /** An adapter to safely externalize the entries. */ - final class EntrySet extends AbstractSet> { - final ConcurrentLinkedHashMap map = ConcurrentLinkedHashMap.this; - - @Override - public int size() { - return map.size(); - } - - @Override - public void clear() { - map.clear(); - } - - @Override - public Iterator> iterator() { - return new EntryIterator(); - } - - @Override - public boolean contains(Object obj) { - if (!(obj instanceof Entry)) { - return false; - } - Entry entry = (Entry) obj; - Node node = map.data.get(entry.getKey()); - return (node != null) && (node.getValue().equals(entry.getValue())); - } - - @Override - public boolean add(Entry entry) { - return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null); - } - - @Override - public boolean remove(Object obj) { - if (!(obj instanceof Entry)) { - return false; - } - Entry entry = (Entry) obj; - return map.remove(entry.getKey(), entry.getValue()); - } - } - - /** An adapter to safely externalize the entry iterator. */ - final class EntryIterator implements Iterator> { - final Iterator> iterator = data.values().iterator(); - Node current; - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Entry next() { - current = iterator.next(); - return new WriteThroughEntry(current); - } - - @Override - public void remove() { - checkState(current != null); - ConcurrentLinkedHashMap.this.remove(current.key); - current = null; - } - } - - /** An entry that allows updates to write through to the map. */ - final class WriteThroughEntry extends SimpleEntry { - static final long serialVersionUID = 1; - - WriteThroughEntry(Node node) { - super(node.key, node.getValue()); - } - - @Override - public V setValue(V value) { - put(getKey(), value); - return super.setValue(value); - } - - Object writeReplace() { - return new SimpleEntry(this); - } - } - - /** A weigher that enforces that the weight falls within a valid range. */ - static final class BoundedEntryWeigher implements EntryWeigher, Serializable { - static final long serialVersionUID = 1; - final EntryWeigher weigher; - - BoundedEntryWeigher(EntryWeigher weigher) { - checkNotNull(weigher); - this.weigher = weigher; - } - - @Override - public int weightOf(K key, V value) { - int weight = weigher.weightOf(key, value); - checkArgument(weight >= 1); - return weight; - } - - Object writeReplace() { - return weigher; - } - } - - /** A queue that discards all additions and is always empty. */ - static final class DiscardingQueue extends AbstractQueue { - @Override public boolean add(Object e) { return true; } - @Override public boolean offer(Object e) { return true; } - @Override public Object poll() { return null; } - @Override public Object peek() { return null; } - @Override public int size() { return 0; } - @Override public Iterator iterator() { return emptyList().iterator(); } - } - - /** A listener that ignores all notifications. */ - enum DiscardingListener implements EvictionListener { - INSTANCE; - - @Override public void onEviction(Object key, Object value) {} - } - - /* ---------------- Serialization Support -------------- */ - - static final long serialVersionUID = 1; - - Object writeReplace() { - return new SerializationProxy(this); - } - - private void readObject(ObjectInputStream stream) throws InvalidObjectException { - throw new InvalidObjectException("Proxy required"); - } - - /** - * A proxy that is serialized instead of the map. The page-replacement - * algorithm's data structures are not serialized so the deserialized - * instance contains only the entries. This is acceptable as caches hold - * transient data that is recomputable and serialization would tend to be - * used as a fast warm-up process. - */ - static final class SerializationProxy implements Serializable { - final EntryWeigher weigher; - final EvictionListener listener; - final int concurrencyLevel; - final Map data; - final long capacity; - - SerializationProxy(ConcurrentLinkedHashMap map) { - concurrencyLevel = map.concurrencyLevel; - data = new HashMap(map); - capacity = map.capacity.get(); - listener = map.listener; - weigher = map.weigher; - } - - Object readResolve() { - ConcurrentLinkedHashMap map = new Builder() - .concurrencyLevel(concurrencyLevel) - .maximumWeightedCapacity(capacity) - .listener(listener) - .weigher(weigher) - .build(); - map.putAll(data); - return map; - } - - static final long serialVersionUID = 1; - } - - /* ---------------- Builder -------------- */ - - /** - * A builder that creates {@link ConcurrentLinkedHashMap} instances. It - * provides a flexible approach for constructing customized instances with - * a named parameter syntax. It can be used in the following manner: - *
{@code
-   * ConcurrentMap> graph = new Builder>()
-   *     .maximumWeightedCapacity(5000)
-   *     .weigher(Weighers.set())
-   *     .build();
-   * }
- */ - public static final class Builder { - static final int DEFAULT_CONCURRENCY_LEVEL = 16; - static final int DEFAULT_INITIAL_CAPACITY = 16; - - EvictionListener listener; - EntryWeigher weigher; - - int concurrencyLevel; - int initialCapacity; - long capacity; - - @SuppressWarnings("unchecked") - public Builder() { - capacity = -1; - weigher = Weighers.entrySingleton(); - initialCapacity = DEFAULT_INITIAL_CAPACITY; - concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL; - listener = (EvictionListener) DiscardingListener.INSTANCE; - } - - /** - * Specifies the initial capacity of the hash table (default 16). - * This is the number of key-value pairs that the hash table can hold - * before a resize operation is required. - * - * @param initialCapacity the initial capacity used to size the hash table - * to accommodate this many entries. - * - * @return Builder - * @throws IllegalArgumentException if the initialCapacity is negative - */ - public Builder initialCapacity(int initialCapacity) { - checkArgument(initialCapacity >= 0); - this.initialCapacity = initialCapacity; - return this; - } - - /** - * Specifies the maximum weighted capacity to coerce the map to and may - * exceed it temporarily. - * - * @param capacity the weighted threshold to bound the map by - * @return Builder - * @throws IllegalArgumentException if the maximumWeightedCapacity is - * negative - */ - public Builder maximumWeightedCapacity(long capacity) { - checkArgument(capacity >= 0); - this.capacity = capacity; - return this; - } - - /** - * Specifies the estimated number of concurrently updating threads. The - * implementation performs internal sizing to try to accommodate this many - * threads (default 16). - * - * @param concurrencyLevel the estimated number of concurrently updating - * threads - * @return Builder - * @throws IllegalArgumentException if the concurrencyLevel is less than or - * equal to zero - */ - public Builder concurrencyLevel(int concurrencyLevel) { - checkArgument(concurrencyLevel > 0); - this.concurrencyLevel = concurrencyLevel; - return this; - } - - /** - * Specifies an optional listener that is registered for notification when - * an entry is evicted. - * - * @param listener the object to forward evicted entries to - * @return Builder - * @throws NullPointerException if the listener is null - */ - public Builder listener(EvictionListener listener) { - checkNotNull(listener); - this.listener = listener; - return this; - } - - /** - * Specifies an algorithm to determine how many the units of capacity a - * value consumes. The default algorithm bounds the map by the number of - * key-value pairs by giving each entry a weight of 1. - * - * @param weigher the algorithm to determine a value's weight - * @return Builder - * @throws NullPointerException if the weigher is null - */ - public Builder weigher(Weigher weigher) { - this.weigher = (weigher == Weighers.singleton()) - ? Weighers.entrySingleton() - : new BoundedEntryWeigher(Weighers.asEntryWeigher(weigher)); - return this; - } - - /** - * Specifies an algorithm to determine how many the units of capacity an - * entry consumes. The default algorithm bounds the map by the number of - * key-value pairs by giving each entry a weight of 1. - * - * @param weigher the algorithm to determine a entry's weight - * @return Builder - * @throws NullPointerException if the weigher is null - */ - public Builder weigher(EntryWeigher weigher) { - this.weigher = (weigher == Weighers.entrySingleton()) - ? Weighers.entrySingleton() - : new BoundedEntryWeigher(weigher); - return this; - } - - /** - * Creates a new {@link ConcurrentLinkedHashMap} instance. - * - * @return ConcurrentLinkedHashMap - * @throws IllegalStateException if the maximum weighted capacity was - * not set - */ - public ConcurrentLinkedHashMap build() { - checkState(capacity >= 0); - return new ConcurrentLinkedHashMap(this); - } - } -} diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/EntryWeigher.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/EntryWeigher.java deleted file mode 100644 index 9bf2a22b0..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/EntryWeigher.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2012 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mssql.googlecode.concurrentlinkedhashmap; - -/** - * A class that can determine the weight of an entry. The total weight threshold - * is used to determine when an eviction is required. - * - * @author ben.manes@gmail.com (Ben Manes) - * @see - * http://code.google.com/p/concurrentlinkedhashmap/ - */ -public interface EntryWeigher { - - /** - * Measures an entry's weight to determine how many units of capacity that - * the key and value consumes. An entry must consume a minimum of one unit. - * - * @param key the key to weigh - * @param value the value to weigh - * @return the entry's weight - */ - int weightOf(K key, V value); -} diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/EvictionListener.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/EvictionListener.java deleted file mode 100644 index 65488587c..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/EvictionListener.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2010 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mssql.googlecode.concurrentlinkedhashmap; - -/** - * A listener registered for notification when an entry is evicted. An instance - * may be called concurrently by multiple threads to process entries. An - * implementation should avoid performing blocking calls or synchronizing on - * shared resources. - *

- * The listener is invoked by {@link ConcurrentLinkedHashMap} on a caller's - * thread and will not block other threads from operating on the map. An - * implementation should be aware that the caller's thread will not expect - * long execution times or failures as a side effect of the listener being - * notified. Execution safety and a fast turn around time can be achieved by - * performing the operation asynchronously, such as by submitting a task to an - * {@link java.util.concurrent.ExecutorService}. - * - * @author ben.manes@gmail.com (Ben Manes) - * @see - * http://code.google.com/p/concurrentlinkedhashmap/ - */ -public interface EvictionListener { - - /** - * A call-back notification that the entry was evicted. - * - * @param key the entry's key - * @param value the entry's value - */ - void onEviction(K key, V value); -} diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/LICENSE b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/LinkedDeque.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/LinkedDeque.java deleted file mode 100644 index 2bb23ea78..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/LinkedDeque.java +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Copyright 2011 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mssql.googlecode.concurrentlinkedhashmap; - -import java.util.AbstractCollection; -import java.util.Collection; -import java.util.Deque; -import java.util.Iterator; -import java.util.NoSuchElementException; - -/** - * Linked list implementation of the {@link Deque} interface where the link - * pointers are tightly integrated with the element. Linked deques have no - * capacity restrictions; they grow as necessary to support usage. They are not - * thread-safe; in the absence of external synchronization, they do not support - * concurrent access by multiple threads. Null elements are prohibited. - *

- * Most LinkedDeque operations run in constant time by assuming that - * the {@link Linked} parameter is associated with the deque instance. Any usage - * that violates this assumption will result in non-deterministic behavior. - *

- * The iterators returned by this class are not fail-fast: If - * the deque is modified at any time after the iterator is created, the iterator - * will be in an unknown state. Thus, in the face of concurrent modification, - * the iterator risks arbitrary, non-deterministic behavior at an undetermined - * time in the future. - * - * @author ben.manes@gmail.com (Ben Manes) - * @param the type of elements held in this collection - * @see - * http://code.google.com/p/concurrentlinkedhashmap/ - */ -final class LinkedDeque> extends AbstractCollection implements Deque { - - // This class provides a doubly-linked list that is optimized for the virtual - // machine. The first and last elements are manipulated instead of a slightly - // more convenient sentinel element to avoid the insertion of null checks with - // NullPointerException throws in the byte code. The links to a removed - // element are cleared to help a generational garbage collector if the - // discarded elements inhabit more than one generation. - - /** - * Pointer to first node. - * Invariant: (first == null && last == null) || - * (first.prev == null) - */ - E first; - - /** - * Pointer to last node. - * Invariant: (first == null && last == null) || - * (last.next == null) - */ - E last; - - /** - * Links the element to the front of the deque so that it becomes the first - * element. - * - * @param e the unlinked element - */ - void linkFirst(final E e) { - final E f = first; - first = e; - - if (f == null) { - last = e; - } else { - f.setPrevious(e); - e.setNext(f); - } - } - - /** - * Links the element to the back of the deque so that it becomes the last - * element. - * - * @param e the unlinked element - */ - void linkLast(final E e) { - final E l = last; - last = e; - - if (l == null) { - first = e; - } else { - l.setNext(e); - e.setPrevious(l); - } - } - - /** Unlinks the non-null first element. */ - E unlinkFirst() { - final E f = first; - final E next = f.getNext(); - f.setNext(null); - - first = next; - if (next == null) { - last = null; - } else { - next.setPrevious(null); - } - return f; - } - - /** Unlinks the non-null last element. */ - E unlinkLast() { - final E l = last; - final E prev = l.getPrevious(); - l.setPrevious(null); - last = prev; - if (prev == null) { - first = null; - } else { - prev.setNext(null); - } - return l; - } - - /** Unlinks the non-null element. */ - void unlink(E e) { - final E prev = e.getPrevious(); - final E next = e.getNext(); - - if (prev == null) { - first = next; - } else { - prev.setNext(next); - e.setPrevious(null); - } - - if (next == null) { - last = prev; - } else { - next.setPrevious(prev); - e.setNext(null); - } - } - - @Override - public boolean isEmpty() { - return (first == null); - } - - void checkNotEmpty() { - if (isEmpty()) { - throw new NoSuchElementException(); - } - } - - /** - * {@inheritDoc} - *

- * Beware that, unlike in most collections, this method is NOT a - * constant-time operation. - */ - @Override - public int size() { - int size = 0; - for (E e = first; e != null; e = e.getNext()) { - size++; - } - return size; - } - - @Override - public void clear() { - for (E e = first; e != null;) { - E next = e.getNext(); - e.setPrevious(null); - e.setNext(null); - e = next; - } - first = last = null; - } - - @Override - public boolean contains(Object o) { - return (o instanceof Linked) && contains((Linked) o); - } - - // A fast-path containment check - boolean contains(Linked e) { - return (e.getPrevious() != null) - || (e.getNext() != null) - || (e == first); - } - - /** - * Moves the element to the front of the deque so that it becomes the first - * element. - * - * @param e the linked element - */ - public void moveToFront(E e) { - if (e != first) { - unlink(e); - linkFirst(e); - } - } - - /** - * Moves the element to the back of the deque so that it becomes the last - * element. - * - * @param e the linked element - */ - public void moveToBack(E e) { - if (e != last) { - unlink(e); - linkLast(e); - } - } - - @Override - public E peek() { - return peekFirst(); - } - - @Override - public E peekFirst() { - return first; - } - - @Override - public E peekLast() { - return last; - } - - @Override - public E getFirst() { - checkNotEmpty(); - return peekFirst(); - } - - @Override - public E getLast() { - checkNotEmpty(); - return peekLast(); - } - - @Override - public E element() { - return getFirst(); - } - - @Override - public boolean offer(E e) { - return offerLast(e); - } - - @Override - public boolean offerFirst(E e) { - if (contains(e)) { - return false; - } - linkFirst(e); - return true; - } - - @Override - public boolean offerLast(E e) { - if (contains(e)) { - return false; - } - linkLast(e); - return true; - } - - @Override - public boolean add(E e) { - return offerLast(e); - } - - - @Override - public void addFirst(E e) { - if (!offerFirst(e)) { - throw new IllegalArgumentException(); - } - } - - @Override - public void addLast(E e) { - if (!offerLast(e)) { - throw new IllegalArgumentException(); - } - } - - @Override - public E poll() { - return pollFirst(); - } - - @Override - public E pollFirst() { - return isEmpty() ? null : unlinkFirst(); - } - - @Override - public E pollLast() { - return isEmpty() ? null : unlinkLast(); - } - - @Override - public E remove() { - return removeFirst(); - } - - @Override - @SuppressWarnings("unchecked") - public boolean remove(Object o) { - return (o instanceof Linked) && remove((E) o); - } - - // A fast-path removal - boolean remove(E e) { - if (contains(e)) { - unlink(e); - return true; - } - return false; - } - - @Override - public E removeFirst() { - checkNotEmpty(); - return pollFirst(); - } - - @Override - public boolean removeFirstOccurrence(Object o) { - return remove(o); - } - - @Override - public E removeLast() { - checkNotEmpty(); - return pollLast(); - } - - @Override - public boolean removeLastOccurrence(Object o) { - return remove(o); - } - - @Override - public boolean removeAll(Collection c) { - boolean modified = false; - for (Object o : c) { - modified |= remove(o); - } - return modified; - } - - @Override - public void push(E e) { - addFirst(e); - } - - @Override - public E pop() { - return removeFirst(); - } - - @Override - public Iterator iterator() { - return new AbstractLinkedIterator(first) { - @Override E computeNext() { - return cursor.getNext(); - } - }; - } - - @Override - public Iterator descendingIterator() { - return new AbstractLinkedIterator(last) { - @Override E computeNext() { - return cursor.getPrevious(); - } - }; - } - - abstract class AbstractLinkedIterator implements Iterator { - E cursor; - - /** - * Creates an iterator that can can traverse the deque. - * - * @param start the initial element to begin traversal from - */ - AbstractLinkedIterator(E start) { - cursor = start; - } - - @Override - public boolean hasNext() { - return (cursor != null); - } - - @Override - public E next() { - if (!hasNext()) { - throw new NoSuchElementException(); - } - E e = cursor; - cursor = computeNext(); - return e; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - - /** - * Retrieves the next element to traverse to or null if there are - * no more elements. - */ - abstract E computeNext(); - } -} - -/** - * An element that is linked on the {@link Deque}. - */ -interface Linked> { - - /** - * Retrieves the previous element or null if either the element is - * unlinked or the first element on the deque. - */ - T getPrevious(); - - /** Sets the previous element or null if there is no link. */ - void setPrevious(T prev); - - /** - * Retrieves the next element or null if either the element is - * unlinked or the last element on the deque. - */ - T getNext(); - - /** Sets the next element or null if there is no link. */ - void setNext(T next); -} diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/NOTICE b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/NOTICE deleted file mode 100644 index e1cedae49..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -ConcurrentLinkedHashMap -Copyright 2008, Ben Manes -Copyright 2010, Google Inc. - -Some alternate data structures provided by JSR-166e -from http://gee.cs.oswego.edu/dl/concurrency-interest/. -Written by Doug Lea and released as Public Domain. diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/Weigher.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/Weigher.java deleted file mode 100644 index 529622c8e..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/Weigher.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2010 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mssql.googlecode.concurrentlinkedhashmap; - -/** - * A class that can determine the weight of a value. The total weight threshold - * is used to determine when an eviction is required. - * - * @author ben.manes@gmail.com (Ben Manes) - * @see - * http://code.google.com/p/concurrentlinkedhashmap/ - */ -public interface Weigher { - - /** - * Measures an object's weight to determine how many units of capacity that - * the value consumes. A value must consume a minimum of one unit. - * - * @param value the object to weigh - * @return the object's weight - */ - int weightOf(V value); -} diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/Weighers.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/Weighers.java deleted file mode 100644 index 29194547d..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/Weighers.java +++ /dev/null @@ -1,293 +0,0 @@ -/* - * Copyright 2010 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mssql.googlecode.concurrentlinkedhashmap; - -import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.checkNotNull; - -import java.io.Serializable; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * A common set of {@link Weigher} and {@link EntryWeigher} implementations. - * - * @author ben.manes@gmail.com (Ben Manes) - * @see - * http://code.google.com/p/concurrentlinkedhashmap/ - */ -public final class Weighers { - - private Weighers() { - throw new AssertionError(); - } - - /** - * A entry weigher backed by the specified weigher. The weight of the value - * determines the weight of the entry. - * - * @param K - * @param V - * @param weigher the weigher to be "wrapped" in a entry weigher. - * @return A entry weigher view of the specified weigher. - */ - public static EntryWeigher asEntryWeigher( - final Weigher weigher) { - return (weigher == singleton()) - ? Weighers.entrySingleton() - : new EntryWeigherView(weigher); - } - - /** - * A weigher where an entry has a weight of 1. A map bounded with - * this weigher will evict when the number of key-value pairs exceeds the - * capacity. - * - * @param K - * @param V - * @return A weigher where a value takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static EntryWeigher entrySingleton() { - return (EntryWeigher) SingletonEntryWeigher.INSTANCE; - } - - /** - * A weigher where a value has a weight of 1. A map bounded with - * this weigher will evict when the number of key-value pairs exceeds the - * capacity. - * - * @param V - * @return A weigher where a value takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static Weigher singleton() { - return (Weigher) SingletonWeigher.INSTANCE; - } - - /** - * A weigher where the value is a byte array and its weight is the number of - * bytes. A map bounded with this weigher will evict when the number of bytes - * exceeds the capacity rather than the number of key-value pairs in the map. - * This allows for restricting the capacity based on the memory-consumption - * and is primarily for usage by dedicated caching servers that hold the - * serialized data. - *

- * A value with a weight of 0 will be rejected by the map. If a value - * with this weight can occur then the caller should eagerly evaluate the - * value and treat it as a removal operation. Alternatively, a custom weigher - * may be specified on the map to assign an empty value a positive weight. - * - * @return A weigher where each byte takes one unit of capacity. - */ - public static Weigher byteArray() { - return ByteArrayWeigher.INSTANCE; - } - - /** - * A weigher where the value is a {@link Iterable} and its weight is the - * number of elements. This weigher only should be used when the alternative - * {@link #collection()} weigher cannot be, as evaluation takes O(n) time. A - * map bounded with this weigher will evict when the total number of elements - * exceeds the capacity rather than the number of key-value pairs in the map. - *

- * A value with a weight of 0 will be rejected by the map. If a value - * with this weight can occur then the caller should eagerly evaluate the - * value and treat it as a removal operation. Alternatively, a custom weigher - * may be specified on the map to assign an empty value a positive weight. - * - * @param E - * @return A weigher where each element takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static Weigher> iterable() { - return (Weigher>) (Weigher) IterableWeigher.INSTANCE; - } - - /** - * A weigher where the value is a {@link Collection} and its weight is the - * number of elements. A map bounded with this weigher will evict when the - * total number of elements exceeds the capacity rather than the number of - * key-value pairs in the map. - *

- * A value with a weight of 0 will be rejected by the map. If a value - * with this weight can occur then the caller should eagerly evaluate the - * value and treat it as a removal operation. Alternatively, a custom weigher - * may be specified on the map to assign an empty value a positive weight. - * - * @param E - * @return A weigher where each element takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static Weigher> collection() { - return (Weigher>) (Weigher) CollectionWeigher.INSTANCE; - } - - /** - * A weigher where the value is a {@link List} and its weight is the number - * of elements. A map bounded with this weigher will evict when the total - * number of elements exceeds the capacity rather than the number of - * key-value pairs in the map. - *

- * A value with a weight of 0 will be rejected by the map. If a value - * with this weight can occur then the caller should eagerly evaluate the - * value and treat it as a removal operation. Alternatively, a custom weigher - * may be specified on the map to assign an empty value a positive weight. - * - * @param E - * @return A weigher where each element takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static Weigher> list() { - return (Weigher>) (Weigher) ListWeigher.INSTANCE; - } - - /** - * A weigher where the value is a {@link Set} and its weight is the number - * of elements. A map bounded with this weigher will evict when the total - * number of elements exceeds the capacity rather than the number of - * key-value pairs in the map. - *

- * A value with a weight of 0 will be rejected by the map. If a value - * with this weight can occur then the caller should eagerly evaluate the - * value and treat it as a removal operation. Alternatively, a custom weigher - * may be specified on the map to assign an empty value a positive weight. - * - * @param E - * @return A weigher where each element takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static Weigher> set() { - return (Weigher>) (Weigher) SetWeigher.INSTANCE; - } - - /** - * A weigher where the value is a {@link Map} and its weight is the number of - * entries. A map bounded with this weigher will evict when the total number of - * entries across all values exceeds the capacity rather than the number of - * key-value pairs in the map. - *

- * A value with a weight of 0 will be rejected by the map. If a value - * with this weight can occur then the caller should eagerly evaluate the - * value and treat it as a removal operation. Alternatively, a custom weigher - * may be specified on the map to assign an empty value a positive weight. - * - * @param A - * @param B - * @return A weigher where each entry takes one unit of capacity. - */ - @SuppressWarnings({"cast", "unchecked"}) - public static Weigher> map() { - return (Weigher>) (Weigher) MapWeigher.INSTANCE; - } - - static final class EntryWeigherView implements EntryWeigher, Serializable { - static final long serialVersionUID = 1; - final Weigher weigher; - - EntryWeigherView(Weigher weigher) { - checkNotNull(weigher); - this.weigher = weigher; - } - - @Override - public int weightOf(K key, V value) { - return weigher.weightOf(value); - } - } - - enum SingletonEntryWeigher implements EntryWeigher { - INSTANCE; - - @Override - public int weightOf(Object key, Object value) { - return 1; - } - } - - enum SingletonWeigher implements Weigher { - INSTANCE; - - @Override - public int weightOf(Object value) { - return 1; - } - } - - enum ByteArrayWeigher implements Weigher { - INSTANCE; - - @Override - public int weightOf(byte[] value) { - return value.length; - } - } - - enum IterableWeigher implements Weigher> { - INSTANCE; - - @Override - public int weightOf(Iterable values) { - if (values instanceof Collection) { - return ((Collection) values).size(); - } - int size = 0; - for (Iterator i = values.iterator(); i.hasNext();) { - i.next(); - size++; - } - return size; - } - } - - enum CollectionWeigher implements Weigher> { - INSTANCE; - - @Override - public int weightOf(Collection values) { - return values.size(); - } - } - - enum ListWeigher implements Weigher> { - INSTANCE; - - @Override - public int weightOf(List values) { - return values.size(); - } - } - - enum SetWeigher implements Weigher> { - INSTANCE; - - @Override - public int weightOf(Set values) { - return values.size(); - } - } - - enum MapWeigher implements Weigher> { - INSTANCE; - - @Override - public int weightOf(Map values) { - return values.size(); - } - } -} diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/package-info.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/package-info.java deleted file mode 100644 index ad0fd0026..000000000 --- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/package-info.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2011 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations - * under the License. - */ - -/** - * This package contains an implementation of a bounded {@link java.util.concurrent.ConcurrentMap} data structure. - *

- * {@link Weigher} is a simple interface for determining how many units of capacity an entry consumes. Depending on which concrete Weigher class is - * used, an entry may consume a different amount of space within the cache. The {@link Weighers} class provides utility methods for obtaining the most - * common kinds of implementations. - *

- * {@link EvictionListener} provides the ability to be notified when an entry is evicted from the map. An eviction occurs when the entry was - * automatically removed due to the map exceeding a capacity threshold. It is not called when an entry was explicitly removed. - *

- * The {@link ConcurrentLinkedHashMap} class supplies an efficient, scalable, thread-safe, bounded map. As with the - * Java Collections Framework the "Concurrent" prefix is used to indicate that the map is not governed by a single exclusion lock. - * - * @see http://code.google.com/p/concurrentlinkedhashmap/ - */ -package mssql.googlecode.concurrentlinkedhashmap; diff --git a/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/PreparedStatementTest.java b/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/PreparedStatementTest.java index c274e3611..e6b94bf7c 100644 --- a/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/PreparedStatementTest.java +++ b/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/PreparedStatementTest.java @@ -7,22 +7,16 @@ */ package com.microsoft.sqlserver.jdbc.unit.statement; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertNotSame; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.fail; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.util.Random; import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicReference; import org.junit.jupiter.api.Test; import org.junit.platform.runner.JUnitPlatform; @@ -32,10 +26,9 @@ import com.microsoft.sqlserver.jdbc.SQLServerDataSource; import com.microsoft.sqlserver.jdbc.SQLServerPreparedStatement; import com.microsoft.sqlserver.testframework.AbstractTest; -import com.microsoft.sqlserver.testframework.util.RandomUtil; @RunWith(JUnitPlatform.class) -public class PreparedStatementTest extends AbstractTest { +public class PreparedStatementTest extends AbstractTest { private void executeSQL(SQLServerConnection conn, String sql) throws SQLException { Statement stmt = conn.createStatement(); stmt.execute(sql); @@ -62,12 +55,13 @@ private int executeSQLReturnFirstInt(SQLServerConnection conn, String sql) throw public void testBatchedUnprepare() throws SQLException { SQLServerConnection conOuter = null; + // Make sure correct settings are used. + SQLServerConnection.setDefaultEnablePrepareOnFirstPreparedStatementCall(SQLServerConnection.getInitialDefaultEnablePrepareOnFirstPreparedStatementCall()); + SQLServerConnection.setDefaultServerPreparedStatementDiscardThreshold(SQLServerConnection.getInitialDefaultServerPreparedStatementDiscardThreshold()); + try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { conOuter = con; - // Turn off use of prepared statement cache. - con.setStatementPoolingCacheSize(0); - // Clean-up proc cache this.executeSQL(con, "DBCC FREEPROCCACHE;"); @@ -83,6 +77,17 @@ public void testBatchedUnprepare() throws SQLException { int iterations = 25; + // Verify no prepares for 1 time only uses. + for(int i = 0; i < iterations; ++i) { + try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { + pstmt.execute(); + } + assertSame(0, con.getDiscardedServerPreparedStatementCount()); + } + + // Verify total cache use. + assertSame(iterations, executeSQLReturnFirstInt(con, verifyTotalCacheUsesQuery)); + query = String.format("/*unpreparetest_%s, sp_executesql->sp_prepexec->sp_execute- batched sp_unprepare*/SELECT * FROM sys.tables;", lookupUniqueifier); int prevDiscardActionCount = 0; @@ -92,7 +97,7 @@ public void testBatchedUnprepare() throws SQLException { // Verify current queue depth is expected. assertSame(prevDiscardActionCount, con.getDiscardedServerPreparedStatementCount()); - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(String.format("%s--%s", query, i))) { + try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { pstmt.execute(); // sp_executesql pstmt.execute(); // sp_prepexec @@ -124,316 +129,40 @@ public void testBatchedUnprepare() throws SQLException { assertSame(0, conOuter.getDiscardedServerPreparedStatementCount()); } - /** - * Test handling of statement pooling for prepared statements. - * - * @throws SQLException - */ - @Test - public void testStatementPooling() throws SQLException { - // Test % handle re-use - try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { - String query = String.format("/*statementpoolingtest_re-use_%s*/SELECT TOP(1) * FROM sys.tables;", UUID.randomUUID().toString()); - - con.setStatementPoolingCacheSize(10); - - boolean[] prepOnFirstCalls = {false, true}; - - for(boolean prepOnFirstCall : prepOnFirstCalls) { - - con.setEnablePrepareOnFirstPreparedStatementCall(prepOnFirstCall); - - int[] queryCounts = {10, 20, 30, 40}; - for(int queryCount : queryCounts) { - String[] queries = new String[queryCount]; - for(int i = 0; i < queries.length; ++i) { - queries[i] = String.format("%s--%s--%s--%s", query, i, queryCount, prepOnFirstCall); - } - - int testsWithHandleReuse = 0; - final int testCount = 500; - for(int i = 0; i < testCount; ++i) { - Random random = new Random(); - int queryNumber = random.nextInt(queries.length); - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement) con.prepareStatement(queries[queryNumber])) { - pstmt.execute(); - - // Grab handle-reuse before it would be populated if initially created. - if(0 < pstmt.getPreparedStatementHandle()) - testsWithHandleReuse++; - - pstmt.getMoreResults(); // Make sure handle is updated. - } - } - System.out.println(String.format("Prep on first call: %s Query count:%s: %s of %s (%s)", prepOnFirstCall, queryCount, testsWithHandleReuse, testCount, (double)testsWithHandleReuse/(double)testCount)); - } - } - } - - try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { - - // Test behvaior with statement pooling. - con.setStatementPoolingCacheSize(10); - - // Test with missing handle failures (fake). - this.executeSQL(con, "CREATE TABLE #update1 (col INT);INSERT #update1 VALUES (1);"); - this.executeSQL(con, "CREATE PROC #updateProc1 AS UPDATE #update1 SET col += 1; IF EXISTS (SELECT * FROM #update1 WHERE col % 5 = 0) THROW 99586, 'Prepared handle GAH!', 1;"); - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement) con.prepareStatement("#updateProc1")) { - for (int i = 0; i < 100; ++i) { - assertSame(1, pstmt.executeUpdate()); - } - } - - // Test batching with missing handle failures (fake). - this.executeSQL(con, "CREATE TABLE #update2 (col INT);INSERT #update2 VALUES (1);"); - this.executeSQL(con, "CREATE PROC #updateProc2 AS UPDATE #update2 SET col += 1; IF EXISTS (SELECT * FROM #update2 WHERE col % 5 = 0) THROW 99586, 'Prepared handle GAH!', 1;"); - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement) con.prepareStatement("#updateProc2")) { - for (int i = 0; i < 100; ++i) - pstmt.addBatch(); - - int[] updateCounts = pstmt.executeBatch(); - - // Verify update counts are correct - for (int i : updateCounts) { - assertSame(1, i); - } - } - } - - try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { - // Test behvaior with statement pooling. - con.setStatementPoolingCacheSize(10); - - String lookupUniqueifier = UUID.randomUUID().toString(); - String query = String.format("/*statementpoolingtest_%s*/SELECT * FROM sys.tables;", lookupUniqueifier); - - // Execute statement first, should create cache entry WITHOUT handle (since sp_executesql was used). - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { - pstmt.execute(); // sp_executesql - pstmt.getMoreResults(); // Make sure handle is updated. - - assertSame(0, pstmt.getPreparedStatementHandle()); - } - - // Execute statement again, should now create handle. - int handle = 0; - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { - pstmt.execute(); // sp_prepexec - pstmt.getMoreResults(); // Make sure handle is updated. - - handle = pstmt.getPreparedStatementHandle(); - assertNotSame(0, handle); - } - - // Execute statement again and verify same handle was used. - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { - pstmt.execute(); // sp_execute - pstmt.getMoreResults(); // Make sure handle is updated. - - assertNotSame(0, pstmt.getPreparedStatementHandle()); - assertSame(handle, pstmt.getPreparedStatementHandle()); - } - - // Execute new statement with different SQL text and verify it does NOT get same handle (should now fall back to using sp_executesql). - SQLServerPreparedStatement outer = null; - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query + ";")) { - outer = pstmt; - pstmt.execute(); // sp_executesql - pstmt.getMoreResults(); // Make sure handle is updated. - - assertSame(0, pstmt.getPreparedStatementHandle()); - assertNotSame(handle, pstmt.getPreparedStatementHandle()); - } - try { - System.out.println(outer.getPreparedStatementHandle()); - fail("Error for invalid use of getPreparedStatementHandle() after statement close expected."); - } - catch(Exception e) { - // Good! - } - } - } - - /** - * Test handling of eviction from statement pooling for prepared statements. - * - * @throws SQLException - */ - @Test - public void testStatementPoolingEviction() throws SQLException { - - for (int testNo = 0; testNo < 2; ++testNo) { - try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { - - int cacheSize = 10; - int discardedStatementCount = testNo == 0 ? 5 /*batched unprepares*/ : 0 /*regular unprepares*/; - - con.setStatementPoolingCacheSize(cacheSize); - con.setServerPreparedStatementDiscardThreshold(discardedStatementCount); - - String lookupUniqueifier = UUID.randomUUID().toString(); - String query = String.format("/*statementpoolingevictiontest_%s*/SELECT * FROM sys.tables; -- ", lookupUniqueifier); - - // Add new statements to fill up the statement pool. - for (int i = 0; i < cacheSize; ++i) { - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query + new Integer(i).toString())) { - pstmt.execute(); // sp_executesql - pstmt.execute(); // sp_prepexec, actual handle created and cached. - } - // Make sure no handles in discard queue (still only in statement pool). - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - } - - // No discarded handles yet, all in statement pool. - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - - // Add new statements to fill up the statement discard action queue - // (new statement pushes existing statement from pool into discard - // action queue). - for (int i = cacheSize; i < cacheSize + 5; ++i) { - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query + new Integer(i).toString())) { - pstmt.execute(); // sp_executesql - pstmt.execute(); // sp_prepexec, actual handle created and cached. - } - // If we use discard queue handles should start going into discard queue. - if(0 == testNo) - assertNotSame(0, con.getDiscardedServerPreparedStatementCount()); - else - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - } - - // If we use it, now discard queue should be "full". - if (0 == testNo) - assertSame(discardedStatementCount, con.getDiscardedServerPreparedStatementCount()); - else - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - - // Adding one more statement should cause one more pooled statement to be invalidated and - // discarding actions should be executed (i.e. sp_unprepare batch), clearing out the discard - // action queue. - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { - pstmt.execute(); // sp_executesql - pstmt.execute(); // sp_prepexec, actual handle created and cached. - } - - // Discard queue should now be empty. - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - - // Set statement pool size to 0 and verify statements get discarded. - int statementsInCache = con.getStatementHandleCacheEntryCount(); - con.setStatementPoolingCacheSize(0); - assertSame(0, con.getStatementHandleCacheEntryCount()); - - if(0 == testNo) - // Verify statements moved over to discard action queue. - assertSame(statementsInCache, con.getDiscardedServerPreparedStatementCount()); - - // Run discard actions (otherwise run on pstmt.close) - con.closeUnreferencedPreparedStatementHandles(); - - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - - // Verify new statement does not go into cache (since cache is now off) - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { - pstmt.execute(); // sp_executesql - pstmt.execute(); // sp_prepexec, actual handle created and cached. - - assertSame(0, con.getStatementHandleCacheEntryCount()); - } - } - } - } - - final class TestPrepareRace implements Runnable { - - SQLServerConnection con; - String[] queries; - AtomicReference exception; - - TestPrepareRace(SQLServerConnection con, String[] queries, AtomicReference exception) { - this.con = con; - this.queries = queries; - this.exception = exception; - } - - @Override - public void run() - { - for (int j = 0; j < 500000; j++) { - try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement) con.prepareStatement(queries[j % 3])) { - pstmt.execute(); - } - catch (SQLException e) { - exception.set(e); - break; - } - } - } - } - - @Test - public void testPrepareRace() throws Exception { - - String[] queries = new String[3]; - queries[0] = String.format("SELECT * FROM sys.tables -- %s", UUID.randomUUID()); - queries[1] = String.format("SELECT * FROM sys.tables -- %s", UUID.randomUUID()); - queries[2] = String.format("SELECT * FROM sys.tables -- %s", UUID.randomUUID()); - - ExecutorService threadPool = Executors.newFixedThreadPool(4); - AtomicReference exception = new AtomicReference<>(); - try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { - - for (int i = 0; i < 4; i++) { - threadPool.execute(new TestPrepareRace(con, queries, exception)); - } - - threadPool.shutdown(); - threadPool.awaitTermination(10, SECONDS); - - assertNull(exception.get()); - - // Force un-prepares. - con.closeUnreferencedPreparedStatementHandles(); - - // Verify that queue is now empty. - assertSame(0, con.getDiscardedServerPreparedStatementCount()); - } - } - /** * Test handling of the two configuration knobs related to prepared statement handling. * * @throws SQLException */ @Test - public void testStatementPoolingPreparedStatementExecAndUnprepareConfig() throws SQLException { + public void testPreparedStatementExecAndUnprepareConfig() throws SQLException { + + // Verify initial defaults are correct: + assertTrue(SQLServerConnection.getInitialDefaultServerPreparedStatementDiscardThreshold() > 1); + assertTrue(false == SQLServerConnection.getInitialDefaultEnablePrepareOnFirstPreparedStatementCall()); + assertSame(SQLServerConnection.getInitialDefaultServerPreparedStatementDiscardThreshold(), SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()); + assertSame(SQLServerConnection.getInitialDefaultEnablePrepareOnFirstPreparedStatementCall(), SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()); // Test Data Source properties SQLServerDataSource dataSource = new SQLServerDataSource(); dataSource.setURL(connectionString); // Verify defaults. - assertTrue(0 < dataSource.getStatementPoolingCacheSize()); + assertSame(SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall(), dataSource.getEnablePrepareOnFirstPreparedStatementCall()); + assertSame(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold(), dataSource.getServerPreparedStatementDiscardThreshold()); // Verify change - dataSource.setStatementPoolingCacheSize(0); - assertSame(0, dataSource.getStatementPoolingCacheSize()); dataSource.setEnablePrepareOnFirstPreparedStatementCall(!dataSource.getEnablePrepareOnFirstPreparedStatementCall()); + assertNotSame(SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall(), dataSource.getEnablePrepareOnFirstPreparedStatementCall()); dataSource.setServerPreparedStatementDiscardThreshold(dataSource.getServerPreparedStatementDiscardThreshold() + 1); + assertNotSame(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold(), dataSource.getServerPreparedStatementDiscardThreshold()); // Verify connection from data source has same parameters. SQLServerConnection connDataSource = (SQLServerConnection)dataSource.getConnection(); - assertSame(dataSource.getStatementPoolingCacheSize(), connDataSource.getStatementPoolingCacheSize()); assertSame(dataSource.getEnablePrepareOnFirstPreparedStatementCall(), connDataSource.getEnablePrepareOnFirstPreparedStatementCall()); assertSame(dataSource.getServerPreparedStatementDiscardThreshold(), connDataSource.getServerPreparedStatementDiscardThreshold()); // Test connection string properties. - - // Test disableStatementPooling - String connectionStringDisableStatementPooling = connectionString + ";disableStatementPooling=true;"; - SQLServerConnection connectionDisableStatementPooling = (SQLServerConnection)DriverManager.getConnection(connectionStringDisableStatementPooling); - assertSame(0, connectionDisableStatementPooling.getStatementPoolingCacheSize()); - assertTrue(!connectionDisableStatementPooling.isStatementPoolingEnabled()); - String connectionStringEnableStatementPooling = connectionString + ";disableStatementPooling=false;"; - SQLServerConnection connectionEnableStatementPooling = (SQLServerConnection)DriverManager.getConnection(connectionStringEnableStatementPooling); - assertTrue(0 < connectionEnableStatementPooling.getStatementPoolingCacheSize()); + // Make sure default is not same as test. + assertNotSame(true, SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()); + assertNotSame(3, SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()); // Test EnablePrepareOnFirstPreparedStatementCall String connectionStringNoExecuteSQL = connectionString + ";enablePrepareOnFirstPreparedStatementCall=true;"; @@ -469,28 +198,48 @@ public void testStatementPoolingPreparedStatementExecAndUnprepareConfig() throws // Good! } + // Change the defaults and verify change stuck. + SQLServerConnection.setDefaultEnablePrepareOnFirstPreparedStatementCall(!SQLServerConnection.getInitialDefaultEnablePrepareOnFirstPreparedStatementCall()); + SQLServerConnection.setDefaultServerPreparedStatementDiscardThreshold(SQLServerConnection.getInitialDefaultServerPreparedStatementDiscardThreshold() - 1); + assertNotSame(SQLServerConnection.getInitialDefaultServerPreparedStatementDiscardThreshold(), SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()); + assertNotSame(SQLServerConnection.getInitialDefaultEnablePrepareOnFirstPreparedStatementCall(), SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()); + + // Verify invalid (negative) change does not stick for threshold. + SQLServerConnection.setDefaultServerPreparedStatementDiscardThreshold(-1); + assertTrue(0 < SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()); + + // Verify instance settings. + SQLServerConnection conn1 = (SQLServerConnection)DriverManager.getConnection(connectionString); + assertSame(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold(), conn1.getServerPreparedStatementDiscardThreshold()); + assertSame(SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall(), conn1.getEnablePrepareOnFirstPreparedStatementCall()); + conn1.setServerPreparedStatementDiscardThreshold(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold() + 1); + conn1.setEnablePrepareOnFirstPreparedStatementCall(!SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()); + assertNotSame(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold(), conn1.getServerPreparedStatementDiscardThreshold()); + assertNotSame(SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall(), conn1.getEnablePrepareOnFirstPreparedStatementCall()); + + // Verify new instance not same as changed instance. + SQLServerConnection conn2 = (SQLServerConnection)DriverManager.getConnection(connectionString); + assertNotSame(conn1.getServerPreparedStatementDiscardThreshold(), conn2.getServerPreparedStatementDiscardThreshold()); + assertNotSame(conn1.getEnablePrepareOnFirstPreparedStatementCall(), conn2.getEnablePrepareOnFirstPreparedStatementCall()); + // Verify instance setting is followed. + SQLServerConnection.setDefaultServerPreparedStatementDiscardThreshold(SQLServerConnection.getInitialDefaultServerPreparedStatementDiscardThreshold()); try (SQLServerConnection con = (SQLServerConnection)DriverManager.getConnection(connectionString)) { - // Turn off use of prepared statement cache. - con.setStatementPoolingCacheSize(0); - String query = "/*unprepSettingsTest*/SELECT * FROM sys.objects;"; // Verify initial default is not serial: - assertTrue(1 < con.getServerPreparedStatementDiscardThreshold()); + assertTrue(1 < SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()); // Verify first use is batched. try (SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement)con.prepareStatement(query)) { - pstmt.execute(); // sp_executesql - pstmt.execute(); // sp_prepexec + pstmt.execute(); } - // Verify that the un-prepare action was not handled immediately. assertSame(1, con.getDiscardedServerPreparedStatementCount()); // Force un-prepares. - con.closeUnreferencedPreparedStatementHandles(); + con.closeDiscardedServerPreparedStatements(); // Verify that queue is now empty. assertSame(0, con.getDiscardedServerPreparedStatementCount()); diff --git a/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTest.java b/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTest.java index 7748e998b..659141759 100644 --- a/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTest.java +++ b/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTest.java @@ -8,17 +8,12 @@ package com.microsoft.sqlserver.jdbc.unit.statement; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assumptions.assumeTrue; import java.sql.DriverManager; -import java.sql.JDBCType; import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.sql.Connection; -import java.sql.Statement; import java.sql.SQLException; import java.sql.Statement; -import java.sql.Types; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; @@ -26,7 +21,6 @@ import org.junit.runner.RunWith; import com.microsoft.sqlserver.jdbc.SQLServerConnection; -import com.microsoft.sqlserver.jdbc.SQLServerPreparedStatement; import com.microsoft.sqlserver.testframework.AbstractTest; import com.microsoft.sqlserver.testframework.DBConnection; import com.microsoft.sqlserver.testframework.Utils; @@ -128,112 +122,6 @@ public void testSelectIntoUpdateCount() throws SQLException { if (null != con) con.close(); } - - /** - * Tests update query - * - * @throws SQLException - */ - @Test - public void testUpdateQuery() throws SQLException { - assumeTrue("JDBC41".equals(Utils.getConfiguredProperty("JDBC_Version")), "Aborting test case as JDBC version is not compatible. "); - - SQLServerConnection con = (SQLServerConnection) DriverManager.getConnection(connectionString); - String sql; - SQLServerPreparedStatement pstmt = null; - JDBCType[] targets = {JDBCType.INTEGER, JDBCType.SMALLINT}; - int rows = 3; - final String tableName = "[updateQuery]"; - - Statement stmt = con.createStatement(); - Utils.dropTableIfExists(tableName, stmt); - stmt.executeUpdate("CREATE TABLE " + tableName + " (" + "c1 int null," + "PK int NOT NULL PRIMARY KEY" + ")"); - - /* - * populate table - */ - sql = "insert into " + tableName + " values(" + "?,?" + ")"; - pstmt = (SQLServerPreparedStatement)con.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, - ResultSet.CONCUR_READ_ONLY, connection.getHoldability()); - - for (int i = 1; i <= rows; i++) { - pstmt.setObject(1, i, JDBCType.INTEGER); - pstmt.setObject(2, i, JDBCType.INTEGER); - pstmt.executeUpdate(); - } - - /* - * Update table - */ - sql = "update " + tableName + " SET c1= ? where PK =1"; - for (int i = 1; i <= rows; i++) { - pstmt = (SQLServerPreparedStatement)con.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY); - for (int t = 0; t < targets.length; t++) { - pstmt.setObject(1, 5 + i, targets[t]); - pstmt.executeUpdate(); - } - } - - /* - * Verify - */ - ResultSet rs = stmt.executeQuery("select * from " + tableName); - rs.next(); - assertEquals(rs.getInt(1), 8, "Value mismatch"); - - - if (null != stmt) - stmt.close(); - if (null != con) - con.close(); - } - - private String xmlTableName = "try_SQLXML_Table"; - - /** - * Tests XML query - * - * @throws SQLException - */ - @Test - public void testXmlQuery() throws SQLException { - assumeTrue("JDBC41".equals(Utils.getConfiguredProperty("JDBC_Version")), "Aborting test case as JDBC version is not compatible. "); - - Connection connection = DriverManager.getConnection(connectionString); - - Statement stmt = connection.createStatement(); - - dropTables(stmt); - createTable(stmt); - - String sql = "UPDATE " + xmlTableName + " SET [c2] = ?, [c3] = ?"; - SQLServerPreparedStatement pstmt = (SQLServerPreparedStatement) connection.prepareStatement(sql); - - pstmt.setObject(1, null); - pstmt.setObject(2, null, Types.SQLXML); - pstmt.executeUpdate(); - - pstmt = (SQLServerPreparedStatement) connection.prepareStatement(sql); - pstmt.setObject(1, null, Types.SQLXML); - pstmt.setObject(2, null); - pstmt.executeUpdate(); - - pstmt = (SQLServerPreparedStatement) connection.prepareStatement(sql); - pstmt.setObject(1, null); - pstmt.setObject(2, null, Types.SQLXML); - pstmt.executeUpdate(); - } - - private void dropTables(Statement stmt) throws SQLException { - stmt.executeUpdate("if object_id('" + xmlTableName + "','U') is not null" + " drop table " + xmlTableName); - } - - private void createTable(Statement stmt) throws SQLException { - - String sql = "CREATE TABLE " + xmlTableName + " ([c1] int, [c2] xml, [c3] xml)"; - - stmt.execute(sql); - } @AfterAll public static void terminate() throws SQLException { diff --git a/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTestAlwaysEncrypted.java b/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTestAlwaysEncrypted.java deleted file mode 100644 index 8fe6d0f9a..000000000 --- a/src/test/java/com/microsoft/sqlserver/jdbc/unit/statement/RegressionTestAlwaysEncrypted.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Microsoft JDBC Driver for SQL Server - * - * Copyright(c) Microsoft Corporation All rights reserved. - * - * This program is made available under the terms of the MIT License. See the LICENSE file in the project root for more information. - */ -/* TODO: Make possible to run automated (including certs, only works on Windows now etc.)*/ -/* -package com.microsoft.sqlserver.jdbc.unit.statement; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.sql.Connection; -import java.sql.Date; -import java.sql.DriverManager; -import java.sql.JDBCType; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; - -import org.junit.jupiter.api.Test; -import org.junit.platform.runner.JUnitPlatform; -import org.junit.runner.RunWith; - -import com.microsoft.sqlserver.jdbc.SQLServerConnection; -import com.microsoft.sqlserver.jdbc.SQLServerPreparedStatement; -import com.microsoft.sqlserver.jdbc.SQLServerResultSet; -import com.microsoft.sqlserver.testframework.AbstractTest; - -@RunWith(JUnitPlatform.class) -public class RegressionTestAlwaysEncrypted extends AbstractTest { - String dateTable = "DateTable"; - String charTable = "CharTable"; - String numericTable = "NumericTable"; - Statement stmt = null; - Connection connection = null; - Date date; - String cekName = "CEK_Auto1"; // you need to change this to your CEK - long dateValue = 212921879801519L; - - @Test - public void alwaysEncrypted1() throws Exception { - - Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver"); - connection = DriverManager.getConnection(connectionString + ";trustservercertificate=true;columnEncryptionSetting=enabled;database=Tobias;"); - assertTrue(null != connection); - - stmt = ((SQLServerConnection) connection).createStatement(); - - date = new Date(dateValue); - - dropTable(); - createNumericTable(); - populateNumericTable(); - printNumericTable(); - - dropTable(); - createDateTable(); - populateDateTable(); - printDateTable(); - - dropTable(); - createNumericTable(); - populateNumericTableWithNull(); - printNumericTable(); - } - - @Test - public void alwaysEncrypted2() throws Exception { - - Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver"); - connection = DriverManager.getConnection(connectionString + ";trustservercertificate=true;columnEncryptionSetting=enabled;database=Tobias;"); - assertTrue(null != connection); - - stmt = ((SQLServerConnection) connection).createStatement(); - - date = new Date(dateValue); - - dropTable(); - createCharTable(); - populateCharTable(); - printCharTable(); - - dropTable(); - createDateTable(); - populateDateTable(); - printDateTable(); - - dropTable(); - createNumericTable(); - populateNumericTableSpecificSetter(); - printNumericTable(); - - } - - private void populateDateTable() { - - try { - String sql = "insert into " + dateTable + " values( " + "?" + ")"; - SQLServerPreparedStatement sqlPstmt = (SQLServerPreparedStatement) ((SQLServerConnection) connection).prepareStatement(sql, - ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, connection.getHoldability()); - sqlPstmt.setObject(1, date); - sqlPstmt.executeUpdate(); - } - catch (Exception e) { - e.printStackTrace(); - } - } - - private void populateCharTable() { - - try { - String sql = "insert into " + charTable + " values( " + "?,?,?,?,?,?" + ")"; - SQLServerPreparedStatement sqlPstmt = (SQLServerPreparedStatement) ((SQLServerConnection) connection).prepareStatement(sql, - ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, connection.getHoldability()); - sqlPstmt.setObject(1, "hi"); - sqlPstmt.setObject(2, "sample"); - sqlPstmt.setObject(3, "hey"); - sqlPstmt.setObject(4, "test"); - sqlPstmt.setObject(5, "hello"); - sqlPstmt.setObject(6, "caching"); - sqlPstmt.executeUpdate(); - } - catch (Exception e) { - e.printStackTrace(); - } - } - - private void populateNumericTable() throws Exception { - String sql = "insert into " + numericTable + " values( " + "?,?,?,?,?,?,?,?,?" + ")"; - SQLServerPreparedStatement sqlPstmt = (SQLServerPreparedStatement) ((SQLServerConnection) connection).prepareStatement(sql, - ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, connection.getHoldability()); - sqlPstmt.setObject(1, true); - sqlPstmt.setObject(2, false); - sqlPstmt.setObject(3, true); - - Integer value = 255; - sqlPstmt.setObject(4, value.shortValue(), JDBCType.TINYINT); - sqlPstmt.setObject(5, value.shortValue(), JDBCType.TINYINT); - sqlPstmt.setObject(6, value.shortValue(), JDBCType.TINYINT); - - sqlPstmt.setObject(7, Short.valueOf("1"), JDBCType.SMALLINT); - sqlPstmt.setObject(8, Short.valueOf("2"), JDBCType.SMALLINT); - sqlPstmt.setObject(9, Short.valueOf("3"), JDBCType.SMALLINT); - - sqlPstmt.executeUpdate(); - } - - private void populateNumericTableSpecificSetter() { - - try { - String sql = "insert into " + numericTable + " values( " + "?,?,?,?,?,?,?,?,?" + ")"; - SQLServerPreparedStatement sqlPstmt = (SQLServerPreparedStatement) ((SQLServerConnection) connection).prepareStatement(sql, - ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, connection.getHoldability()); - sqlPstmt.setBoolean(1, true); - sqlPstmt.setBoolean(2, false); - sqlPstmt.setBoolean(3, true); - - Integer value = 255; - sqlPstmt.setShort(4, value.shortValue()); - sqlPstmt.setShort(5, value.shortValue()); - sqlPstmt.setShort(6, value.shortValue()); - - sqlPstmt.setByte(7, Byte.valueOf("127")); - sqlPstmt.setByte(8, Byte.valueOf("127")); - sqlPstmt.setByte(9, Byte.valueOf("127")); - - sqlPstmt.executeUpdate(); - } - catch (Exception e) { - e.printStackTrace(); - } - } - - private void populateNumericTableWithNull() { - - try { - String sql = "insert into " + numericTable + " values( " + "?,?,?" + ",?,?,?" + ",?,?,?" + ")"; - SQLServerPreparedStatement sqlPstmt = (SQLServerPreparedStatement) ((SQLServerConnection) connection).prepareStatement(sql, - ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, connection.getHoldability()); - sqlPstmt.setObject(1, null, java.sql.Types.BIT); - sqlPstmt.setObject(2, null, java.sql.Types.BIT); - sqlPstmt.setObject(3, null, java.sql.Types.BIT); - - sqlPstmt.setObject(4, null, java.sql.Types.TINYINT); - sqlPstmt.setObject(5, null, java.sql.Types.TINYINT); - sqlPstmt.setObject(6, null, java.sql.Types.TINYINT); - - sqlPstmt.setObject(7, null, java.sql.Types.SMALLINT); - sqlPstmt.setObject(8, null, java.sql.Types.SMALLINT); - sqlPstmt.setObject(9, null, java.sql.Types.SMALLINT); - - sqlPstmt.executeUpdate(); - } - catch (Exception e) { - e.printStackTrace(); - } - } - - private void printDateTable() throws SQLException { - - stmt = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - SQLServerResultSet rs = (SQLServerResultSet) stmt.executeQuery("select * from " + dateTable); - - while (rs.next()) { - System.out.println(rs.getObject(1)); - } - } - - private void printCharTable() throws SQLException { - stmt = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - SQLServerResultSet rs = (SQLServerResultSet) stmt.executeQuery("select * from " + charTable); - - while (rs.next()) { - System.out.println(rs.getObject(1)); - System.out.println(rs.getObject(2)); - System.out.println(rs.getObject(3)); - System.out.println(rs.getObject(4)); - System.out.println(rs.getObject(5)); - System.out.println(rs.getObject(6)); - } - - } - - private void printNumericTable() throws SQLException { - stmt = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - SQLServerResultSet rs = (SQLServerResultSet) stmt.executeQuery("select * from " + numericTable); - - while (rs.next()) { - System.out.println(rs.getObject(1)); - System.out.println(rs.getObject(2)); - System.out.println(rs.getObject(3)); - System.out.println(rs.getObject(4)); - System.out.println(rs.getObject(5)); - System.out.println(rs.getObject(6)); - } - - } - - private void createDateTable() throws SQLException { - - String sql = "create table " + dateTable + " (" - + "RandomizedDate date ENCRYPTED WITH (ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," + ");"; - - try { - stmt.execute(sql); - } - catch (SQLException e) { - System.out.println(e); - } - } - - private void createCharTable() throws SQLException { - String sql = "create table " + charTable + " (" + "PlainChar char(20) null," - + "RandomizedChar char(20) COLLATE Latin1_General_BIN2 ENCRYPTED WITH (ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - + "DeterministicChar char(20) COLLATE Latin1_General_BIN2 ENCRYPTED WITH (ENCRYPTION_TYPE = DETERMINISTIC, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - - + "PlainVarchar varchar(50) null," - + "RandomizedVarchar varchar(50) COLLATE Latin1_General_BIN2 ENCRYPTED WITH (ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - + "DeterministicVarchar varchar(50) COLLATE Latin1_General_BIN2 ENCRYPTED WITH (ENCRYPTION_TYPE = DETERMINISTIC, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - - + ");"; - - try { - stmt.execute(sql); - } - catch (SQLException e) { - System.out.println(e.getMessage()); - } - } - - private void createNumericTable() throws SQLException { - String sql = "create table " + numericTable + " (" + "PlainBit bit null," - + "RandomizedBit bit ENCRYPTED WITH (ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - + "DeterministicBit bit ENCRYPTED WITH (ENCRYPTION_TYPE = DETERMINISTIC, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - - + "PlainTinyint tinyint null," - + "RandomizedTinyint tinyint ENCRYPTED WITH (ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - + "DeterministicTinyint tinyint ENCRYPTED WITH (ENCRYPTION_TYPE = DETERMINISTIC, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - - + "PlainSmallint smallint null," - + "RandomizedSmallint smallint ENCRYPTED WITH (ENCRYPTION_TYPE = RANDOMIZED, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - + "DeterministicSmallint smallint ENCRYPTED WITH (ENCRYPTION_TYPE = DETERMINISTIC, ALGORITHM = 'AEAD_AES_256_CBC_HMAC_SHA_256', COLUMN_ENCRYPTION_KEY = " - + cekName + ") NULL," - - + ");"; - - try { - stmt.execute(sql); - } - catch (SQLException e) { - System.out.println(e.getMessage()); - } - } - - private void dropTable() throws SQLException { - stmt.executeUpdate("if object_id('" + dateTable + "','U') is not null" + " drop table " + dateTable); - stmt.executeUpdate("if object_id('" + charTable + "','U') is not null" + " drop table " + charTable); - stmt.executeUpdate("if object_id('" + numericTable + "','U') is not null" + " drop table " + numericTable); - } -} -*/ \ No newline at end of file