diff --git a/.gitignore b/.gitignore
index acb9b8d91..fad40fcb3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,7 +16,6 @@ local.properties
.classpath
.vscode/
.settings/
-.gradle/
.loadpath
# External tool builders
diff --git a/build.gradle b/build.gradle
index 7a402dfbc..55792f0a3 100644
--- a/build.gradle
+++ b/build.gradle
@@ -68,7 +68,7 @@ repositories {
dependencies {
compile 'com.microsoft.azure:azure-keyvault:0.9.7',
'com.microsoft.azure:adal4j:1.1.3'
-
+
testCompile 'junit:junit:4.12',
'org.junit.platform:junit-platform-console:1.0.0-M3',
'org.junit.platform:junit-platform-commons:1.0.0-M3',
diff --git a/pom.xml b/pom.xml
index 62b790dd7..753104ef2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -118,7 +118,7 @@
com.zaxxerHikariCP
- 2.6.1
+ 2.6.0test
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/ParsedSQLMetadata.java b/src/main/java/com/microsoft/sqlserver/jdbc/ParsedSQLMetadata.java
deleted file mode 100644
index 19c34ebec..000000000
--- a/src/main/java/com/microsoft/sqlserver/jdbc/ParsedSQLMetadata.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Microsoft JDBC Driver for SQL Server
- *
- * Copyright(c) Microsoft Corporation All rights reserved.
- *
- * This program is made available under the terms of the MIT License. See the LICENSE file in the project root for more information.
- */
-
-package com.microsoft.sqlserver.jdbc;
-
-/**
- * Used for caching of meta data from parsed SQL text.
- */
-final class ParsedSQLCacheItem {
- /** The SQL text AFTER processing. */
- String processedSQL;
- int parameterCount;
- String procedureName;
- boolean bReturnValueSyntax;
-
- ParsedSQLCacheItem(String processedSQL, int parameterCount, String procedureName, boolean bReturnValueSyntax) {
- this.processedSQL = processedSQL;
- this.parameterCount = parameterCount;
- this.procedureName = procedureName;
- this.bReturnValueSyntax = bReturnValueSyntax;
- }
-}
-
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java
index e550cb68e..566c42b75 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerConnection.java
@@ -56,10 +56,6 @@
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSException;
-import mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap;
-import mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder;
-import mssql.googlecode.concurrentlinkedhashmap.EvictionListener;
-
/**
* SQLServerConnection implements a JDBC connection to SQL Server. SQLServerConnections support JDBC connection pooling and may be either physical
* JDBC connections or logical JDBC connections.
@@ -89,20 +85,22 @@ public class SQLServerConnection implements ISQLServerConnection {
// Threasholds related to when prepared statement handles are cleaned-up. 1 == immediately.
/**
- * The default for the prepared statement clean-up action threshold (i.e. when sp_unprepare is called).
+ * The initial default on application start-up for the prepared statement clean-up action threshold (i.e. when sp_unprepare is called).
*/
- static final int DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD = 10; // Used to set the initial default, can be changed later.
+ static final private int INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD = 10; // Used to set the initial default, can be changed later.
+ static private int defaultServerPreparedStatementDiscardThreshold = -1; // Current default for new connections
private int serverPreparedStatementDiscardThreshold = -1; // Current limit for this particular connection.
/**
- * The default for if prepared statements should execute sp_executesql before following the prepare, unprepare pattern.
+ * The initial default on application start-up for if prepared statements should execute sp_executesql before following the prepare, unprepare pattern.
*/
- static final boolean DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL = false; // Used to set the initial default, can be changed later. false == use sp_executesql -> sp_prepexec -> sp_execute -> batched -> sp_unprepare pattern, true == skip sp_executesql part of pattern.
+ static final private boolean INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL = false; // Used to set the initial default, can be changed later. false == use sp_executesql -> sp_prepexec -> sp_execute -> batched -> sp_unprepare pattern, true == skip sp_executesql part of pattern.
+ static private Boolean defaultEnablePrepareOnFirstPreparedStatementCall = null; // Current default for new connections
private Boolean enablePrepareOnFirstPreparedStatementCall = null; // Current limit for this particular connection.
// Handle the actual queue of discarded prepared statements.
- private ConcurrentLinkedQueue discardedPreparedStatementHandles = new ConcurrentLinkedQueue();
- private AtomicInteger discardedPreparedStatementHandleCount = new AtomicInteger(0);
+ private ConcurrentLinkedQueue discardedPreparedStatementHandles = new ConcurrentLinkedQueue();
+ private AtomicInteger discardedPreparedStatementHandleQueueCount = new AtomicInteger(0);
private boolean fedAuthRequiredByUser = false;
private boolean fedAuthRequiredPreLoginResponse = false;
@@ -117,189 +115,6 @@ public class SQLServerConnection implements ISQLServerConnection {
private SqlFedAuthToken fedAuthToken = null;
- static class Sha1HashKey {
- private byte[] bytes;
-
- Sha1HashKey(String sql, String parametersDefinition) {
- this(String.format("%s%s", sql, parametersDefinition));
- }
-
- Sha1HashKey(String s) {
- bytes = getSha1Digest().digest(s.getBytes());
- }
-
- public boolean equals(Object obj) {
- if (!(obj instanceof Sha1HashKey))
- return false;
-
- return java.util.Arrays.equals(bytes, ((Sha1HashKey)obj).bytes);
- }
-
- public int hashCode() {
- return java.util.Arrays.hashCode(bytes);
- }
-
- private java.security.MessageDigest getSha1Digest() {
- try {
- return java.security.MessageDigest.getInstance("SHA-1");
- }
- catch (final java.security.NoSuchAlgorithmException e) {
- // This is not theoretically possible, but we're forced to catch it anyway
- throw new RuntimeException(e);
- }
- }
- }
-
- /**
- * Used to keep track of an individual prepared statement handle.
- */
- class PreparedStatementHandle {
- private int handle = 0;
- private final AtomicInteger handleRefCount = new AtomicInteger();
- private boolean isDirectSql;
- private volatile boolean evictedFromCache;
- private volatile boolean explicitlyDiscarded;
- private Sha1HashKey key;
-
- PreparedStatementHandle(Sha1HashKey key, int handle, boolean isDirectSql, boolean isEvictedFromCache) {
- this.key = key;
- this.handle = handle;
- this.isDirectSql = isDirectSql;
- this.setIsEvictedFromCache(isEvictedFromCache);
- handleRefCount.set(1);
- }
-
- /** Has the statement been evicted from the statement handle cache. */
- private boolean isEvictedFromCache() {
- return evictedFromCache;
- }
-
- /** Specify whether the statement been evicted from the statement handle cache. */
- private void setIsEvictedFromCache(boolean isEvictedFromCache) {
- this.evictedFromCache = isEvictedFromCache;
- }
-
- /** Specify that this statement has been explicitly discarded from being used by the cache. */
- void setIsExplicitlyDiscarded() {
- this.explicitlyDiscarded = true;
-
- evictCachedPreparedStatementHandle(this);
- }
-
- /** Has the statement been explicitly discarded. */
- private boolean isExplicitlyDiscarded() {
- return explicitlyDiscarded;
- }
-
- /** Get the actual handle. */
- int getHandle() {
- return handle;
- }
-
- /** Get the cache key. */
- Sha1HashKey getKey() {
- return key;
- }
-
- boolean isDirectSql() {
- return isDirectSql;
- }
-
- /** Make sure handle cannot be re-used.
- *
- * @return
- * false: Handle could not be discarded, it is in use.
- * true: Handle was successfully put on path for discarding.
- */
- private boolean tryDiscardHandle() {
- return handleRefCount.compareAndSet(0, -999);
- }
-
- /** Returns whether this statement has been discarded and can no longer be re-used. */
- private boolean isDiscarded() {
- return 0 > handleRefCount.intValue();
- }
-
- /** Adds a new reference to this handle, i.e. re-using it.
- *
- * @return
- * false: Reference could not be added, statement has been discarded or does not have a handle associated with it.
- * true: Reference was successfully added.
- */
- boolean tryAddReference() {
- if (isDiscarded() || isExplicitlyDiscarded())
- return false;
- else {
- int refCount = handleRefCount.incrementAndGet();
- return refCount > 0;
- }
- }
-
- /** Remove a reference from this handle*/
- void removeReference() {
- handleRefCount.decrementAndGet();
- }
- }
-
- /** Size of the parsed SQL-text metadata cache */
- static final private int PARSED_SQL_CACHE_SIZE = 100;
-
- /** Cache of parsed SQL meta data */
- static private ConcurrentLinkedHashMap parsedSQLCache;
-
- static {
- parsedSQLCache = new Builder()
- .maximumWeightedCapacity(PARSED_SQL_CACHE_SIZE)
- .build();
- }
-
- /** Get prepared statement cache entry if exists, if not parse and create a new one */
- static ParsedSQLCacheItem getCachedParsedSQL(Sha1HashKey key) {
- return parsedSQLCache.get(key);
- }
-
- /** Parse and create a information about parsed SQL text */
- static ParsedSQLCacheItem parseAndCacheSQL(Sha1HashKey key, String sql) throws SQLServerException {
- JDBCSyntaxTranslator translator = new JDBCSyntaxTranslator();
-
- String parsedSql = translator.translate(sql);
- String procName = translator.getProcedureName(); // may return null
- boolean returnValueSyntax = translator.hasReturnValueSyntax();
- int paramCount = countParams(parsedSql);
-
- ParsedSQLCacheItem cacheItem = new ParsedSQLCacheItem (parsedSql, paramCount, procName, returnValueSyntax);
- parsedSQLCache.putIfAbsent(key, cacheItem);
- return cacheItem;
- }
-
- /** Size of the prepared statement handle cache */
- private int statementPoolingCacheSize = 10;
-
- /** Default size for prepared statement caches */
- static final int DEFAULT_STATEMENT_POOLING_CACHE_SIZE = 10;
- /** Cache of prepared statement handles */
- private ConcurrentLinkedHashMap preparedStatementHandleCache;
- /** Cache of prepared statement parameter metadata */
- private ConcurrentLinkedHashMap parameterMetadataCache;
-
- /**
- * Find statement parameters.
- *
- * @param sql
- * SQL text to parse for number of parameters to intialize.
- */
- private static int countParams(String sql) {
- int nParams = 0;
-
- // Figure out the expected number of parameters by counting the
- // parameter placeholders in the SQL string.
- int offset = -1;
- while ((offset = ParameterUtils.scanSQLForChar('?', sql, ++offset)) < sql.length())
- ++nParams;
-
- return nParams;
- }
-
SqlFedAuthToken getAuthenticationResult() {
return fedAuthToken;
}
@@ -907,18 +722,6 @@ final boolean attachConnId() {
connectionlogger.severe(message);
throw new UnsupportedOperationException(message);
}
-
- // Caching turned on?
- if (0 < this.getStatementPoolingCacheSize()) {
- preparedStatementHandleCache = new Builder()
- .maximumWeightedCapacity(getStatementPoolingCacheSize())
- .listener(new PreparedStatementCacheEvictionListener())
- .build();
-
- parameterMetadataCache = new Builder()
- .maximumWeightedCapacity(getStatementPoolingCacheSize())
- .build();
- }
}
void setFailoverPartnerServerProvided(String partner) {
@@ -1395,28 +1198,14 @@ Connection connectInternal(Properties propsIn,
sendTimeAsDatetime = booleanPropertyOn(sPropKey, sPropValue);
- // Must be set before DISABLE_STATEMENT_POOLING
- sPropKey = SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString();
- if (activeConnectionProperties.getProperty(sPropKey) != null && activeConnectionProperties.getProperty(sPropKey).length() > 0) {
- try {
- int n = (new Integer(activeConnectionProperties.getProperty(sPropKey))).intValue();
- this.setStatementPoolingCacheSize(n);
- }
- catch (NumberFormatException e) {
- MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_statementPoolingCacheSize"));
- Object[] msgArgs = {activeConnectionProperties.getProperty(sPropKey)};
- SQLServerException.makeFromDriverError(this, this, form.format(msgArgs), null, false);
- }
- }
-
- // Must be set after STATEMENT_POOLING_CACHE_SIZE
sPropKey = SQLServerDriverBooleanProperty.DISABLE_STATEMENT_POOLING.toString();
sPropValue = activeConnectionProperties.getProperty(sPropKey);
- if (null != sPropValue) {
- // If disabled set cache size to 0 if disabled.
- if(booleanPropertyOn(sPropKey, sPropValue))
- this.setStatementPoolingCacheSize(0);
- }
+ if (sPropValue != null) // if the user does not set it, it is ok but if set the value can only be true
+ if (false == booleanPropertyOn(sPropKey, sPropValue)) {
+ MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_invaliddisableStatementPooling"));
+ Object[] msgArgs = {new String(sPropValue)};
+ SQLServerException.makeFromDriverError(this, this, form.format(msgArgs), null, false);
+ }
sPropKey = SQLServerDriverBooleanProperty.INTEGRATED_SECURITY.toString();
sPropValue = activeConnectionProperties.getProperty(sPropKey);
@@ -2971,13 +2760,6 @@ public void close() throws SQLServerException {
tdsChannel.close();
}
- // Invalidate statement caches.
- if(null != preparedStatementHandleCache)
- preparedStatementHandleCache.clear();
-
- if(null != parameterMetadataCache)
- parameterMetadataCache.clear();
-
// Clean-up queue etc. related to batching of prepared statement discard actions (sp_unprepare).
cleanupPreparedStatementDiscardActions();
@@ -5519,24 +5301,38 @@ public static synchronized void setColumnEncryptionKeyCacheTtl(int columnEncrypt
static synchronized long getColumnEncryptionKeyCacheTtl() {
return columnEncryptionKeyCacheTtl;
}
+
+ /**
+ * Used to keep track of an individual handle ready for un-prepare.
+ */
+ private final class PreparedStatementDiscardItem {
+
+ int handle;
+ boolean directSql;
+
+ PreparedStatementDiscardItem(int handle, boolean directSql) {
+ this.handle = handle;
+ this.directSql = directSql;
+ }
+ }
+
/**
* Enqueue a discarded prepared statement handle to be clean-up on the server.
*
- * @param statementHandle
- * The prepared statement handle that should be scheduled for unprepare.
+ * @param handle
+ * The prepared statement handle
+ * @param directSql
+ * Whether the statement handle is direct SQL (true) or a cursor (false)
*/
- final void enqueueUnprepareStatementHandle(PreparedStatementHandle statementHandle) {
- if(null == statementHandle)
- return;
-
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.finer(this + ": Adding PreparedHandle to queue for un-prepare:" + statementHandle.getHandle());
+ final void enqueuePreparedStatementDiscardItem(int handle, boolean directSql) {
+ if (this.getConnectionLogger().isLoggable(java.util.logging.Level.FINER))
+ this.getConnectionLogger().finer(this + ": Adding PreparedHandle to queue for un-prepare:" + handle);
// Add the new handle to the discarding queue and find out current # enqueued.
- this.discardedPreparedStatementHandles.add(statementHandle);
- this.discardedPreparedStatementHandleCount.incrementAndGet();
+ this.discardedPreparedStatementHandles.add(new PreparedStatementDiscardItem(handle, directSql));
+ this.discardedPreparedStatementHandleQueueCount.incrementAndGet();
}
@@ -5546,22 +5342,59 @@ final void enqueueUnprepareStatementHandle(PreparedStatementHandle statementHand
* @return Returns the current value per the description.
*/
public int getDiscardedServerPreparedStatementCount() {
- return this.discardedPreparedStatementHandleCount.get();
+ return this.discardedPreparedStatementHandleQueueCount.get();
}
/**
* Forces the un-prepare requests for any outstanding discarded prepared statements to be executed.
*/
- public void closeUnreferencedPreparedStatementHandles() {
- this.unprepareUnreferencedPreparedStatementHandles(true);
+ public void closeDiscardedServerPreparedStatements() {
+ this.handlePreparedStatementDiscardActions(true);
}
/**
* Remove references to outstanding un-prepare requests. Should be run when connection is closed.
*/
private final void cleanupPreparedStatementDiscardActions() {
- discardedPreparedStatementHandles.clear();
- discardedPreparedStatementHandleCount.set(0);
+ this.discardedPreparedStatementHandles.clear();
+ this.discardedPreparedStatementHandleQueueCount.set(0);
+ }
+
+ /**
+ * The initial default on application start-up for if prepared statements should execute sp_executesql before following the prepare, unprepare pattern.
+ *
+ * @return Returns the current setting per the description.
+ */
+ static public boolean getInitialDefaultEnablePrepareOnFirstPreparedStatementCall() {
+ return INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL;
+ }
+
+ /**
+ * Returns the default behavior for new connection instances. If false the first execution will call sp_executesql and not prepare
+ * a statement, once the second execution happens it will call sp_prepexec and actually setup a prepared statement handle. Following
+ * executions will call sp_execute. This relieves the need for sp_unprepare on prepared statement close if the statement is only
+ * executed once. Initial setting for this option is available in INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL.
+ *
+ * @return Returns the current setting per the description.
+ */
+ static public boolean getDefaultEnablePrepareOnFirstPreparedStatementCall() {
+ if(null == defaultEnablePrepareOnFirstPreparedStatementCall)
+ return getInitialDefaultEnablePrepareOnFirstPreparedStatementCall();
+ else
+ return defaultEnablePrepareOnFirstPreparedStatementCall;
+ }
+
+ /**
+ * Specifies the default behavior for new connection instances. If value is false the first execution will call sp_executesql and not prepare
+ * a statement, once the second execution happens it will call sp_prepexec and actually setup a prepared statement handle. Following
+ * executions will call sp_execute. This relieves the need for sp_unprepare on prepared statement close if the statement is only
+ * executed once. Initial setting for this option is available in INITIAL_DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL.
+ *
+ * @param value
+ * Changes the setting per the description.
+ */
+ static public void setDefaultEnablePrepareOnFirstPreparedStatementCall(boolean value) {
+ defaultEnablePrepareOnFirstPreparedStatementCall = value;
}
/**
@@ -5574,7 +5407,7 @@ private final void cleanupPreparedStatementDiscardActions() {
*/
public boolean getEnablePrepareOnFirstPreparedStatementCall() {
if(null == this.enablePrepareOnFirstPreparedStatementCall)
- return DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL;
+ return getDefaultEnablePrepareOnFirstPreparedStatementCall();
else
return this.enablePrepareOnFirstPreparedStatementCall;
}
@@ -5593,201 +5426,130 @@ public void setEnablePrepareOnFirstPreparedStatementCall(boolean value) {
}
/**
- * Returns the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions
- * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is
- * {@literal <=} 1, unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1, these calls
- * will be batched together to avoid overhead of calling sp_unprepare too often. The default for this option can be changed by calling
- * getDefaultServerPreparedStatementDiscardThreshold().
+ * The initial default on application start-up for the prepared statement clean-up action threshold (i.e. when sp_unprepare is called).
*
* @return Returns the current setting per the description.
*/
- public int getServerPreparedStatementDiscardThreshold() {
- if (0 > this.serverPreparedStatementDiscardThreshold)
- return DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD;
- else
- return this.serverPreparedStatementDiscardThreshold;
+ static public int getInitialDefaultServerPreparedStatementDiscardThreshold() {
+ return INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD;
}
/**
- * Specifies the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions
+ * Returns the default behavior for new connection instances. This setting controls how many outstanding prepared statement discard actions
* (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is
* {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be
- * batched together to avoid overhead of calling sp_unprepare too often.
+ * batched together to avoid overhead of calling sp_unprepare too often. Initial setting for this option is available in
+ * INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.
*
- * @param value
- * Changes the setting per the description.
+ * @return Returns the current setting per the description.
*/
- public void setServerPreparedStatementDiscardThreshold(int value) {
- this.serverPreparedStatementDiscardThreshold = Math.max(0, value);
- }
-
- final boolean isPreparedStatementUnprepareBatchingEnabled() {
- return 1 < getServerPreparedStatementDiscardThreshold();
+ static public int getDefaultServerPreparedStatementDiscardThreshold() {
+ if(0 > defaultServerPreparedStatementDiscardThreshold)
+ return getInitialDefaultServerPreparedStatementDiscardThreshold();
+ else
+ return defaultServerPreparedStatementDiscardThreshold;
}
/**
- * Cleans-up discarded prepared statement handles on the server using batched un-prepare actions if the batching threshold has been reached.
+ * Specifies the default behavior for new connection instances. This setting controls how many outstanding prepared statement discard actions
+ * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is
+ * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be
+ * batched together to avoid overhead of calling sp_unprepare too often. Initial setting for this option is available in
+ * INITIAL_DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.
*
- * @param force
- * When force is set to true we ignore the current threshold for if the discard actions should run and run them anyway.
- */
- final void unprepareUnreferencedPreparedStatementHandles(boolean force) {
- // Skip out if session is unavailable to adhere to previous non-batched behavior.
- if (isSessionUnAvailable())
- return;
-
- final int threshold = getServerPreparedStatementDiscardThreshold();
-
- // Met threshold to clean-up?
- if (force || threshold < getDiscardedServerPreparedStatementCount()) {
-
- // Create batch of sp_unprepare statements.
- StringBuilder sql = new StringBuilder(threshold * 32/*EXEC sp_cursorunprepare++;*/);
-
- // Build the string containing no more than the # of handles to remove.
- // Note that sp_unprepare can fail if the statement is already removed.
- // However, the server will only abort that statement and continue with
- // the remaining clean-up.
- int handlesRemoved = 0;
- PreparedStatementHandle statementHandle = null;
-
- while (null != (statementHandle = discardedPreparedStatementHandles.poll())){
- ++handlesRemoved;
-
- sql.append(statementHandle.isDirectSql() ? "EXEC sp_unprepare " : "EXEC sp_cursorunprepare ")
- .append(statementHandle.getHandle())
- .append(';');
- }
-
- try {
- // Execute the batched set.
- try(Statement stmt = this.createStatement()) {
- stmt.execute(sql.toString());
- }
-
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.finer(this + ": Finished un-preparing handle count:" + handlesRemoved);
- }
- catch(SQLException e) {
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.log(Level.FINER, this + ": Error batch-closing at least one prepared handle", e);
- }
-
- // Decrement threshold counter
- discardedPreparedStatementHandleCount.addAndGet(-handlesRemoved);
- }
- }
-
-
- /**
- * Returns the size of the prepared statement cache for this connection. A value less than 1 means no cache.
- * @return Returns the current setting per the description.
+ * @param value
+ * Changes the setting per the description.
*/
- public int getStatementPoolingCacheSize() {
- return statementPoolingCacheSize;
+ static public void setDefaultServerPreparedStatementDiscardThreshold(int value) {
+ defaultServerPreparedStatementDiscardThreshold = value;
}
/**
- * Returns the current number of pooled prepared statement handles.
+ * Returns the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions
+ * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is
+ * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be
+ * batched together to avoid overhead of calling sp_unprepare too often. The default for this option can be changed by calling
+ * getDefaultServerPreparedStatementDiscardThreshold().
+ *
* @return Returns the current setting per the description.
*/
- public int getStatementHandleCacheEntryCount() {
- if(!isStatementPoolingEnabled())
- return 0;
+ public int getServerPreparedStatementDiscardThreshold() {
+ if(0 > this.serverPreparedStatementDiscardThreshold)
+ return getDefaultServerPreparedStatementDiscardThreshold();
else
- return this.preparedStatementHandleCache.size();
+ return this.serverPreparedStatementDiscardThreshold;
}
/**
- * Whether statement pooling is enabled or not for this connection.
- * @return Returns the current setting per the description.
+ * Specifies the behavior for a specific connection instance. This setting controls how many outstanding prepared statement discard actions
+ * (sp_unprepare) can be outstanding per connection before a call to clean-up the outstanding handles on the server is executed. If the setting is
+ * {@literal <=} 1 unprepare actions will be executed immedietely on prepared statement close. If it is set to {@literal >} 1 these calls will be
+ * batched together to avoid overhead of calling sp_unprepare too often.
+ *
+ * @param value
+ * Changes the setting per the description.
*/
- public boolean isStatementPoolingEnabled() {
- return null != preparedStatementHandleCache && 0 < this.getStatementPoolingCacheSize();
+ public void setServerPreparedStatementDiscardThreshold(int value) {
+ this.serverPreparedStatementDiscardThreshold = value;
}
/**
- * Specifies the size of the prepared statement cache for this conection. A value less than 1 means no cache.
- * @param value The new cache size.
+ * Cleans-up discarded prepared statement handles on the server using batched un-prepare actions if the batching threshold has been reached.
*
+ * @param force
+ * When force is set to true we ignore the current threshold for if the discard actions should run and run them anyway.
*/
- public void setStatementPoolingCacheSize(int value) {
- if (value != this.statementPoolingCacheSize) {
- value = Math.max(0, value);
- statementPoolingCacheSize = value;
-
- if (null != preparedStatementHandleCache)
- preparedStatementHandleCache.setCapacity(value);
-
- if (null != parameterMetadataCache)
- parameterMetadataCache.setCapacity(value);
- }
- }
-
- /** Get a parameter metadata cache entry if statement pooling is enabled */
- final SQLServerParameterMetaData getCachedParameterMetadata(Sha1HashKey key) {
- if(!isStatementPoolingEnabled())
- return null;
-
- return parameterMetadataCache.get(key);
- }
-
- /** Register a parameter metadata cache entry if statement pooling is enabled */
- final void registerCachedParameterMetadata(Sha1HashKey key, SQLServerParameterMetaData pmd) {
- if(!isStatementPoolingEnabled() || null == pmd)
+ final void handlePreparedStatementDiscardActions(boolean force) {
+ // Skip out if session is unavailable to adhere to previous non-batched behavior.
+ if (this.isSessionUnAvailable())
return;
-
- parameterMetadataCache.put(key, pmd);
- }
-
- /** Get or create prepared statement handle cache entry if statement pooling is enabled */
- final PreparedStatementHandle getCachedPreparedStatementHandle(Sha1HashKey key) {
- if(!isStatementPoolingEnabled())
- return null;
-
- return preparedStatementHandleCache.get(key);
- }
- /** Get or create prepared statement handle cache entry if statement pooling is enabled */
- final PreparedStatementHandle registerCachedPreparedStatementHandle(Sha1HashKey key, int handle, boolean isDirectSql) {
- if(!isStatementPoolingEnabled() || null == key)
- return null;
-
- PreparedStatementHandle cacheItem = new PreparedStatementHandle(key, handle, isDirectSql, false);
- preparedStatementHandleCache.putIfAbsent(key, cacheItem);
- return cacheItem;
- }
+ final int threshold = this.getServerPreparedStatementDiscardThreshold();
- /** Return prepared statement handle cache entry so it can be un-prepared. */
- final void returnCachedPreparedStatementHandle(PreparedStatementHandle handle) {
- handle.removeReference();
+ // Find out current # enqueued, if force, make sure it always exceeds threshold.
+ int count = force ? threshold + 1 : this.getDiscardedServerPreparedStatementCount();
- if (handle.isEvictedFromCache() && handle.tryDiscardHandle())
- enqueueUnprepareStatementHandle(handle);
- }
-
- /** Force eviction of prepared statement handle cache entry. */
- final void evictCachedPreparedStatementHandle(PreparedStatementHandle handle) {
- if(null == handle || null == handle.getKey())
- return;
-
- preparedStatementHandleCache.remove(handle.getKey());
- }
+ // Met threshold to clean-up?
+ if(threshold < count) {
+
+ PreparedStatementDiscardItem prepStmtDiscardAction = this.discardedPreparedStatementHandles.poll();
+ if(null != prepStmtDiscardAction) {
+ int handlesRemoved = 0;
+
+ // Create batch of sp_unprepare statements.
+ StringBuilder sql = new StringBuilder(count * 32/*EXEC sp_cursorunprepare++;*/);
+
+ // Build the string containing no more than the # of handles to remove.
+ // Note that sp_unprepare can fail if the statement is already removed.
+ // However, the server will only abort that statement and continue with
+ // the remaining clean-up.
+ do {
+ ++handlesRemoved;
+
+ sql.append(prepStmtDiscardAction.directSql ? "EXEC sp_unprepare " : "EXEC sp_cursorunprepare ")
+ .append(prepStmtDiscardAction.handle)
+ .append(';');
+ } while (null != (prepStmtDiscardAction = this.discardedPreparedStatementHandles.poll()));
- // Handle closing handles when removed from cache.
- final class PreparedStatementCacheEvictionListener implements EvictionListener {
- public void onEviction(Sha1HashKey key, PreparedStatementHandle handle) {
- if(null != handle) {
- handle.setIsEvictedFromCache(true); // Mark as evicted from cache.
+ try {
+ // Execute the batched set.
+ try(Statement stmt = this.createStatement()) {
+ stmt.execute(sql.toString());
+ }
- // Only discard if not referenced.
- if(handle.tryDiscardHandle()) {
- enqueueUnprepareStatementHandle(handle);
- // Do not run discard actions here! Can interfere with executing statement.
- }
+ if (this.getConnectionLogger().isLoggable(java.util.logging.Level.FINER))
+ this.getConnectionLogger().finer(this + ": Finished un-preparing handle count:" + handlesRemoved);
+ }
+ catch(SQLException e) {
+ if (this.getConnectionLogger().isLoggable(java.util.logging.Level.FINER))
+ this.getConnectionLogger().log(Level.FINER, this + ": Error batch-closing at least one prepared handle", e);
+ }
+
+ // Decrement threshold counter
+ this.discardedPreparedStatementHandleQueueCount.addAndGet(-handlesRemoved);
}
}
- }
+ }
}
// Helper class for security manager functions used by SQLServerConnection class.
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java
index b94bb4d6f..414e065e7 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDataSource.java
@@ -694,8 +694,8 @@ public void setEnablePrepareOnFirstPreparedStatementCall(boolean enablePrepareOn
* @return Returns the current setting per the description.
*/
public boolean getEnablePrepareOnFirstPreparedStatementCall() {
- boolean defaultValue = SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.getDefaultValue();
- return getBooleanProperty(connectionProps, SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), defaultValue);
+ return getBooleanProperty(connectionProps, SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(),
+ SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall());
}
/**
@@ -720,28 +720,8 @@ public void setServerPreparedStatementDiscardThreshold(int serverPreparedStateme
* @return Returns the current setting per the description.
*/
public int getServerPreparedStatementDiscardThreshold() {
- int defaultSize = SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.getDefaultValue();
- return getIntProperty(connectionProps, SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), defaultSize);
- }
-
- /**
- * Specifies the size of the prepared statement cache for this conection. A value less than 1 means no cache.
- *
- * @param statementPoolingCacheSize
- * Changes the setting per the description.
- */
- public void setStatementPoolingCacheSize(int statementPoolingCacheSize) {
- setIntProperty(connectionProps, SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(), statementPoolingCacheSize);
- }
-
- /**
- * Returns the size of the prepared statement cache for this conection. A value less than 1 means no cache.
- *
- * @return Returns the current setting per the description.
- */
- public int getStatementPoolingCacheSize() {
- int defaultSize = SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.getDefaultValue();
- return getIntProperty(connectionProps, SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(), defaultSize);
+ return getIntProperty(connectionProps, SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(),
+ SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold());
}
/**
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java
index 1ea9f3125..646af582b 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerDriver.java
@@ -268,15 +268,13 @@ public String toString() {
}
enum SQLServerDriverIntProperty {
- PACKET_SIZE ("packetSize", TDS.DEFAULT_PACKET_SIZE),
- LOCK_TIMEOUT ("lockTimeout", -1),
- LOGIN_TIMEOUT ("loginTimeout", 15),
- QUERY_TIMEOUT ("queryTimeout", -1),
- PORT_NUMBER ("portNumber", 1433),
- SOCKET_TIMEOUT ("socketTimeout", 0),
- SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD("serverPreparedStatementDiscardThreshold", SQLServerConnection.DEFAULT_SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD),
- STATEMENT_POOLING_CACHE_SIZE ("statementPoolingCacheSize", SQLServerConnection.DEFAULT_STATEMENT_POOLING_CACHE_SIZE),
- ;
+ PACKET_SIZE ("packetSize", TDS.DEFAULT_PACKET_SIZE),
+ LOCK_TIMEOUT ("lockTimeout", -1),
+ LOGIN_TIMEOUT ("loginTimeout", 15),
+ QUERY_TIMEOUT ("queryTimeout", -1),
+ PORT_NUMBER ("portNumber", 1433),
+ SOCKET_TIMEOUT ("socketTimeout", 0),
+ SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD("serverPreparedStatementDiscardThreshold", -1/*This is not the default, default handled in SQLServerConnection and is not final/const*/);
private String name;
private int defaultValue;
@@ -296,9 +294,9 @@ public String toString() {
}
}
-enum SQLServerDriverBooleanProperty
+enum SQLServerDriverBooleanProperty
{
- DISABLE_STATEMENT_POOLING ("disableStatementPooling", false),
+ DISABLE_STATEMENT_POOLING ("disableStatementPooling", true),
ENCRYPT ("encrypt", false),
INTEGRATED_SECURITY ("integratedSecurity", false),
LAST_UPDATE_COUNT ("lastUpdateCount", true),
@@ -310,7 +308,7 @@ enum SQLServerDriverBooleanProperty
TRUST_SERVER_CERTIFICATE ("trustServerCertificate", false),
XOPEN_STATES ("xopenStates", false),
FIPS ("fips", false),
- ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT("enablePrepareOnFirstPreparedStatementCall", SQLServerConnection.DEFAULT_ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT_CALL);
+ ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT("enablePrepareOnFirstPreparedStatementCall", false/*This is not the default, default handled in SQLServerConnection and is not final/const*/);
private String name;
private boolean defaultValue;
@@ -339,10 +337,10 @@ public final class SQLServerDriver implements java.sql.Driver {
{
// default required available choices
// property name value property (if appropriate)
- new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_INTENT.toString(), SQLServerDriverStringProperty.APPLICATION_INTENT.getDefaultValue(), false, new String[]{ApplicationIntent.READ_ONLY.toString(), ApplicationIntent.READ_WRITE.toString()}),
- new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_NAME.toString(), SQLServerDriverStringProperty.APPLICATION_NAME.getDefaultValue(), false, null),
+ new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_INTENT.toString(), SQLServerDriverStringProperty.APPLICATION_INTENT.getDefaultValue(), false, new String[]{ApplicationIntent.READ_ONLY.toString(), ApplicationIntent.READ_WRITE.toString()}),
+ new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.APPLICATION_NAME.toString(), SQLServerDriverStringProperty.APPLICATION_NAME.getDefaultValue(), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.COLUMN_ENCRYPTION.toString(), SQLServerDriverStringProperty.COLUMN_ENCRYPTION.getDefaultValue(), false, new String[] {ColumnEncryptionSetting.Disabled.toString(), ColumnEncryptionSetting.Enabled.toString()}),
- new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.DATABASE_NAME.toString(), SQLServerDriverStringProperty.DATABASE_NAME.getDefaultValue(), false, null),
+ new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.DATABASE_NAME.toString(), SQLServerDriverStringProperty.DATABASE_NAME.getDefaultValue(), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.DISABLE_STATEMENT_POOLING.toString(), Boolean.toString(SQLServerDriverBooleanProperty.DISABLE_STATEMENT_POOLING.getDefaultValue()), false, new String[] {"true"}),
new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.ENCRYPT.toString(), Boolean.toString(SQLServerDriverBooleanProperty.ENCRYPT.getDefaultValue()), false, TRUE_FALSE),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.FAILOVER_PARTNER.toString(), SQLServerDriverStringProperty.FAILOVER_PARTNER.getDefaultValue(), false, null),
@@ -356,7 +354,7 @@ public final class SQLServerDriver implements java.sql.Driver {
new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.LOCK_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.LOCK_TIMEOUT.getDefaultValue()), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.LOGIN_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.LOGIN_TIMEOUT.getDefaultValue()), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.MULTI_SUBNET_FAILOVER.toString(), Boolean.toString(SQLServerDriverBooleanProperty.MULTI_SUBNET_FAILOVER.getDefaultValue()), false, TRUE_FALSE),
- new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.PACKET_SIZE.toString(), Integer.toString(SQLServerDriverIntProperty.PACKET_SIZE.getDefaultValue()), false, null),
+ new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.PACKET_SIZE.toString(), Integer.toString(SQLServerDriverIntProperty.PACKET_SIZE.getDefaultValue()), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.PASSWORD.toString(), SQLServerDriverStringProperty.PASSWORD.getDefaultValue(), true, null),
new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.PORT_NUMBER.toString(), Integer.toString(SQLServerDriverIntProperty.PORT_NUMBER.getDefaultValue()), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.QUERY_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.QUERY_TIMEOUT.getDefaultValue()), false, null),
@@ -373,16 +371,15 @@ public final class SQLServerDriver implements java.sql.Driver {
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.toString(), SQLServerDriverStringProperty.TRUST_STORE_PASSWORD.getDefaultValue(), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.SEND_TIME_AS_DATETIME.toString(), Boolean.toString(SQLServerDriverBooleanProperty.SEND_TIME_AS_DATETIME.getDefaultValue()), false, TRUE_FALSE),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.USER.toString(), SQLServerDriverStringProperty.USER.getDefaultValue(), true, null),
- new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.WORKSTATION_ID.toString(), SQLServerDriverStringProperty.WORKSTATION_ID.getDefaultValue(), false, null),
+ new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.WORKSTATION_ID.toString(), SQLServerDriverStringProperty.WORKSTATION_ID.getDefaultValue(), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.XOPEN_STATES.toString(), Boolean.toString(SQLServerDriverBooleanProperty.XOPEN_STATES.getDefaultValue()), false, TRUE_FALSE),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.AUTHENTICATION_SCHEME.toString(), SQLServerDriverStringProperty.AUTHENTICATION_SCHEME.getDefaultValue(), false, new String[] {AuthenticationScheme.javaKerberos.toString(),AuthenticationScheme.nativeAuthentication.toString()}),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.AUTHENTICATION.toString(), SQLServerDriverStringProperty.AUTHENTICATION.getDefaultValue(), false, new String[] {SqlAuthentication.NotSpecified.toString(),SqlAuthentication.SqlPassword.toString(),SqlAuthentication.ActiveDirectoryPassword.toString(),SqlAuthentication.ActiveDirectoryIntegrated.toString()}),
- new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.FIPS_PROVIDER.toString(), SQLServerDriverStringProperty.FIPS_PROVIDER.getDefaultValue(), false, null),
+ new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.FIPS_PROVIDER.toString(), SQLServerDriverStringProperty.FIPS_PROVIDER.getDefaultValue(), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.SOCKET_TIMEOUT.toString(), Integer.toString(SQLServerDriverIntProperty.SOCKET_TIMEOUT.getDefaultValue()), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.FIPS.toString(), Boolean.toString(SQLServerDriverBooleanProperty.FIPS.getDefaultValue()), false, TRUE_FALSE),
- new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), Boolean.toString(SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.getDefaultValue()), false,TRUE_FALSE),
- new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), Integer.toString(SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.getDefaultValue()), false, null),
- new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.toString(), Integer.toString(SQLServerDriverIntProperty.STATEMENT_POOLING_CACHE_SIZE.getDefaultValue()), false, null),
+ new SQLServerDriverPropertyInfo(SQLServerDriverBooleanProperty.ENABLE_PREPARE_ON_FIRST_PREPARED_STATEMENT.toString(), Boolean.toString(SQLServerConnection.getDefaultEnablePrepareOnFirstPreparedStatementCall()), false, TRUE_FALSE),
+ new SQLServerDriverPropertyInfo(SQLServerDriverIntProperty.SERVER_PREPARED_STATEMENT_DISCARD_THRESHOLD.toString(), Integer.toString(SQLServerConnection.getDefaultServerPreparedStatementDiscardThreshold()), false, null),
new SQLServerDriverPropertyInfo(SQLServerDriverStringProperty.JAAS_CONFIG_NAME.toString(), SQLServerDriverStringProperty.JAAS_CONFIG_NAME.getDefaultValue(), false, null),
};
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java
index 5d8fc5110..a309b8dad 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerParameterMetaData.java
@@ -505,9 +505,7 @@ String parseProcIdentifier(String procIdentifier) throws SQLServerException {
}
private void checkClosed() throws SQLServerException {
- // stmtParent does not seem to be re-used, should just verify connection is not closed.
- // stmtParent.checkClosed();
- con.checkClosed();
+ stmtParent.checkClosed();
}
/**
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java
index 4bfcb58e2..48b6926e4 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerPreparedStatement.java
@@ -6,10 +6,7 @@
* This program is made available under the terms of the MIT License. See the LICENSE file in the project root for more information.
*/
-package com.microsoft.sqlserver.jdbc;
-
-import static com.microsoft.sqlserver.jdbc.SQLServerConnection.getCachedParsedSQL;
-import static com.microsoft.sqlserver.jdbc.SQLServerConnection.parseAndCacheSQL;
+package com.microsoft.sqlserver.jdbc;
import java.io.InputStream;
import java.io.Reader;
@@ -31,9 +28,6 @@
import java.util.Vector;
import java.util.logging.Level;
-import com.microsoft.sqlserver.jdbc.SQLServerConnection.PreparedStatementHandle;
-import com.microsoft.sqlserver.jdbc.SQLServerConnection.Sha1HashKey;
-
/**
* SQLServerPreparedStatement provides JDBC prepared statement functionality. SQLServerPreparedStatement provides methods for the user to supply
* parameters as any native Java type and many Java object types.
@@ -57,10 +51,13 @@ public class SQLServerPreparedStatement extends SQLServerStatement implements IS
private static final int BATCH_STATEMENT_DELIMITER_TDS_72 = 0xFF;
final int nBatchStatementDelimiter = BATCH_STATEMENT_DELIMITER_TDS_72;
+ /** the user's prepared sql syntax */
+ private String sqlCommand;
+
/** The prepared type definitions */
private String preparedTypeDefinitions;
- /** Processed SQL statement text, may not be same as what user initially passed. */
+ /** The users SQL statement text */
final String userSQL;
/** SQL statement with expanded parameter tokens */
@@ -69,12 +66,6 @@ public class SQLServerPreparedStatement extends SQLServerStatement implements IS
/** True if this execute has been called for this statement at least once */
private boolean isExecutedAtLeastOnce = false;
- /** Reference to cache item for statement handle pooling. Only used to decrement ref count on statement close. */
- private PreparedStatementHandle cachedPreparedStatementHandle;
-
- /** Hash of user supplied SQL statement used for various cache lookups */
- private Sha1HashKey sqlTextCacheKey;
-
/**
* Array with parameter names generated in buildParamTypeDefinitions For mapping encryption information to parameters, as the second result set
* returned by sp_describe_parameter_encryption doesn't depend on order of input parameter
@@ -99,36 +90,6 @@ public class SQLServerPreparedStatement extends SQLServerStatement implements IS
/** The prepared statement handle returned by the server */
private int prepStmtHandle = 0;
- private void setPreparedStatementHandle(int handle) {
- this.prepStmtHandle = handle;
- }
-
- /** The server handle for this prepared statement. If a value {@literal <} 1 is returned no handle has been created.
- *
- * @return
- * Per the description.
- * @throws SQLServerException when an error occurs
- */
- public int getPreparedStatementHandle() throws SQLServerException {
- checkClosed();
- return prepStmtHandle;
- }
-
- /** Returns true if this statement has a server handle.
- *
- * @return
- * Per the description.
- */
- private boolean hasPreparedStatementHandle() {
- return 0 < prepStmtHandle;
- }
-
- /** Resets the server handle for this prepared statement to no handle.
- */
- private void resetPrepStmtHandle() {
- prepStmtHandle = 0;
- }
-
/** Flag set to true when statement execution is expected to return the prepared statement handle */
private boolean expectPrepStmtHandle = false;
@@ -164,65 +125,47 @@ String getClassNameInternal() {
int nRSConcur,
SQLServerStatementColumnEncryptionSetting stmtColEncSetting) throws SQLServerException {
super(conn, nRSType, nRSConcur, stmtColEncSetting);
-
- if (null == sql) {
- MessageFormat form = new MessageFormat(SQLServerException.getErrString("R_NullValue"));
- Object[] msgArgs1 = {"Statement SQL"};
- throw new SQLServerException(form.format(msgArgs1), null);
- }
-
stmtPoolable = true;
+ sqlCommand = sql;
- // Create a cache key for this statement.
- sqlTextCacheKey = new Sha1HashKey(sql);
-
- // Parse or fetch SQL metadata from cache.
- ParsedSQLCacheItem parsedSQL = getCachedParsedSQL(sqlTextCacheKey);
- if(null != parsedSQL) {
- isExecutedAtLeastOnce = true;
- }
- else {
- parsedSQL = parseAndCacheSQL(sqlTextCacheKey, sql);
- }
+ JDBCSyntaxTranslator translator = new JDBCSyntaxTranslator();
+ sql = translator.translate(sql);
+ procedureName = translator.getProcedureName(); // may return null
+ bReturnValueSyntax = translator.hasReturnValueSyntax();
- // Retrieve meta data from cache item.
- procedureName = parsedSQL.procedureName;
- bReturnValueSyntax = parsedSQL.bReturnValueSyntax;
- userSQL = parsedSQL.processedSQL;
- initParams(parsedSQL.parameterCount);
+ userSQL = sql;
+ initParams(userSQL);
}
/**
* Close the prepared statement's prepared handle.
*/
private void closePreparedHandle() {
- if (!hasPreparedStatementHandle())
+ if (0 == prepStmtHandle)
return;
// If the connection is already closed, don't bother trying to close
// the prepared handle. We won't be able to, and it's already closed
// on the server anyway.
if (connection.isSessionUnAvailable()) {
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.finer(this + ": Not closing PreparedHandle:" + prepStmtHandle + "; connection is already closed.");
+ if (getStatementLogger().isLoggable(java.util.logging.Level.FINER))
+ getStatementLogger().finer(this + ": Not closing PreparedHandle:" + prepStmtHandle + "; connection is already closed.");
}
else {
isExecutedAtLeastOnce = false;
final int handleToClose = prepStmtHandle;
- resetPrepStmtHandle();
+ prepStmtHandle = 0;
- // Handle unprepare actions through statement pooling.
- if (null != cachedPreparedStatementHandle) {
- connection.returnCachedPreparedStatementHandle(cachedPreparedStatementHandle);
- }
- // If no reference to a statement pool cache item is found handle unprepare actions through batching @ connection level.
- else if(connection.isPreparedStatementUnprepareBatchingEnabled()) {
- connection.enqueueUnprepareStatementHandle(connection.new PreparedStatementHandle(null, handleToClose, executedSqlDirectly, true));
+ // Using batched clean-up? If not, use old method of calling sp_unprepare.
+ if(1 < connection.getServerPreparedStatementDiscardThreshold()) {
+ // Handle unprepare actions through batching @ connection level.
+ connection.enqueuePreparedStatementDiscardItem(handleToClose, executedSqlDirectly);
+ connection.handlePreparedStatementDiscardActions(false);
}
else {
- // Non batched behavior (same as pre batch clean-up implementation)
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.finer(this + ": Closing PreparedHandle:" + handleToClose);
+ // Non batched behavior (same as pre batch impl.)
+ if (getStatementLogger().isLoggable(java.util.logging.Level.FINER))
+ getStatementLogger().finer(this + ": Closing PreparedHandle:" + handleToClose);
final class PreparedHandleClose extends UninterruptableTDSCommand {
PreparedHandleClose() {
@@ -246,16 +189,13 @@ final boolean doExecute() throws SQLServerException {
executeCommand(new PreparedHandleClose());
}
catch (SQLServerException e) {
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.log(Level.FINER, this + ": Error (ignored) closing PreparedHandle:" + handleToClose, e);
+ if (getStatementLogger().isLoggable(java.util.logging.Level.FINER))
+ getStatementLogger().log(Level.FINER, this + ": Error (ignored) closing PreparedHandle:" + handleToClose, e);
}
- if (loggerExternal.isLoggable(java.util.logging.Level.FINER))
- loggerExternal.finer(this + ": Closed PreparedHandle:" + handleToClose);
+ if (getStatementLogger().isLoggable(java.util.logging.Level.FINER))
+ getStatementLogger().finer(this + ": Closed PreparedHandle:" + handleToClose);
}
-
- // Always run any outstanding discard actions as statement pooling always uses batched sp_unprepare.
- connection.unprepareUnreferencedPreparedStatementHandles(false);
}
}
@@ -276,13 +216,21 @@ final void closeInternal() {
batchParamValues = null;
}
- /**
+ /**
* Intialize the statement parameters.
*
- * @param nParams
- * Number of parameters to Intialize.
+ * @param sql
*/
- /* L0 */ final void initParams(int nParams) {
+ /* L0 */ final void initParams(String sql) {
+ encryptionMetadataIsRetrieved = false;
+ int nParams = 0;
+
+ // Figure out the expected number of parameters by counting the
+ // parameter placeholders in the SQL string.
+ int offset = -1;
+ while ((offset = ParameterUtils.scanSQLForChar('?', sql, ++offset)) < sql.length())
+ ++nParams;
+
inOutParam = new Parameter[nParams];
for (int i = 0; i < nParams; i++) {
inOutParam[i] = new Parameter(Util.shouldHonorAEForParameters(stmtColumnEncriptionSetting, connection));
@@ -487,7 +435,6 @@ final void doExecutePreparedStatement(PrepStmtExecCmd command) throws SQLServerE
loggerExternal.finer(toString() + " ActivityId: " + ActivityCorrelator.getNext().toString());
}
- boolean hasExistingTypeDefinitions = preparedTypeDefinitions != null;
boolean hasNewTypeDefinitions = true;
if (!encryptionMetadataIsRetrieved) {
hasNewTypeDefinitions = buildPreparedStrings(inOutParam, false);
@@ -509,32 +456,15 @@ final void doExecutePreparedStatement(PrepStmtExecCmd command) throws SQLServerE
hasNewTypeDefinitions = buildPreparedStrings(inOutParam, true);
}
- // Retry execution if existing handle could not be re-used.
- for(int attempt = 1; attempt <= 2; ++attempt) {
- try {
- // Re-use handle if available, requires parameter definitions which are not available until here.
- if (reuseCachedHandle(hasNewTypeDefinitions, 1 < attempt)) {
- hasNewTypeDefinitions = false;
- }
-
- // Start the request and detach the response reader so that we can
- // continue using it after we return.
- TDSWriter tdsWriter = command.startRequest(TDS.PKT_RPC);
-
- doPrepExec(tdsWriter, inOutParam, hasNewTypeDefinitions, hasExistingTypeDefinitions);
-
- ensureExecuteResultsReader(command.startResponse(getIsResponseBufferingAdaptive()));
- startResults();
- getNextResult();
- }
- catch(SQLException e) {
- if (retryBasedOnFailedReuseOfCachedHandle(e, attempt))
- continue;
- else
- throw e;
- }
- break;
- }
+ // Start the request and detach the response reader so that we can
+ // continue using it after we return.
+ TDSWriter tdsWriter = command.startRequest(TDS.PKT_RPC);
+
+ doPrepExec(tdsWriter, inOutParam, hasNewTypeDefinitions);
+
+ ensureExecuteResultsReader(command.startResponse(getIsResponseBufferingAdaptive()));
+ startResults();
+ getNextResult();
if (EXECUTE_QUERY == executeMethod && null == resultSet) {
SQLServerException.makeFromDriverError(connection, this, SQLServerException.getErrString("R_noResultset"), null, true);
@@ -544,15 +474,6 @@ else if (EXECUTE_UPDATE == executeMethod && null != resultSet) {
}
}
- /** Should the execution be retried because the re-used cached handle could not be re-used due to server side state changes? */
- private boolean retryBasedOnFailedReuseOfCachedHandle(SQLException e, int attempt) {
- // Only retry based on these error codes:
- // 586: The prepared statement handle %d is not valid in this context. Please verify that current database, user default schema, and ANSI_NULLS and QUOTED_IDENTIFIER set options are not changed since the handle is prepared.
- // 8179: Could not find prepared statement with handle %d.
- // 99586: Error used for testing.
- return 1 == attempt && (586 == e.getErrorCode() || 8179 == e.getErrorCode() || 99586 == e.getErrorCode());
- }
-
/**
* Consume the OUT parameter for the statement object itself.
*
@@ -573,14 +494,7 @@ boolean onRetValue(TDSReader tdsReader) throws SQLServerException {
expectPrepStmtHandle = false;
Parameter param = new Parameter(Util.shouldHonorAEForParameters(stmtColumnEncriptionSetting, connection));
param.skipRetValStatus(tdsReader);
-
- setPreparedStatementHandle(param.getInt(tdsReader));
-
- // Cache the reference to the newly created handle, NOT for cursorable handles.
- if (null == cachedPreparedStatementHandle && !isCursorable(executeMethod)) {
- cachedPreparedStatementHandle = connection.registerCachedPreparedStatementHandle(new Sha1HashKey(preparedSQL, preparedTypeDefinitions), prepStmtHandle, executedSqlDirectly);
- }
-
+ prepStmtHandle = param.getInt(tdsReader);
param.skipValue(tdsReader, true);
if (getStatementLogger().isLoggable(java.util.logging.Level.FINER))
getStatementLogger().finer(toString() + ": Setting PreparedHandle:" + prepStmtHandle);
@@ -616,7 +530,7 @@ void sendParamsByRPC(TDSWriter tdsWriter,
private void buildServerCursorPrepExecParams(TDSWriter tdsWriter) throws SQLServerException {
if (getStatementLogger().isLoggable(java.util.logging.Level.FINE))
- getStatementLogger().fine(toString() + ": calling sp_cursorprepexec: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL);
+ getStatementLogger().fine(toString() + ": calling sp_cursorprepexec: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL);
expectPrepStmtHandle = true;
executedSqlDirectly = false;
@@ -631,8 +545,8 @@ private void buildServerCursorPrepExecParams(TDSWriter tdsWriter) throws SQLServ
//
// IN (reprepare): Old handle to unprepare before repreparing
// OUT: The newly prepared handle
- tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), true);
- resetPrepStmtHandle();
+ tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), true);
+ prepStmtHandle = 0;
// OUT
tdsWriter.writeRPCInt(null, new Integer(0), true); // cursor ID (OUTPUT)
@@ -658,7 +572,7 @@ private void buildServerCursorPrepExecParams(TDSWriter tdsWriter) throws SQLServ
private void buildPrepExecParams(TDSWriter tdsWriter) throws SQLServerException {
if (getStatementLogger().isLoggable(java.util.logging.Level.FINE))
- getStatementLogger().fine(toString() + ": calling sp_prepexec: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL);
+ getStatementLogger().fine(toString() + ": calling sp_prepexec: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL);
expectPrepStmtHandle = true;
executedSqlDirectly = true;
@@ -673,8 +587,8 @@ private void buildPrepExecParams(TDSWriter tdsWriter) throws SQLServerException
//
// IN (reprepare): Old handle to unprepare before repreparing
// OUT: The newly prepared handle
- tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), true);
- resetPrepStmtHandle();
+ tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), true);
+ prepStmtHandle = 0;
// IN
tdsWriter.writeRPCStringUnicode((preparedTypeDefinitions.length() > 0) ? preparedTypeDefinitions : null);
@@ -698,7 +612,7 @@ private void buildExecSQLParams(TDSWriter tdsWriter) throws SQLServerException {
tdsWriter.writeByte((byte) 0); // RPC procedure option 2
// No handle used.
- resetPrepStmtHandle();
+ prepStmtHandle = 0;
// IN
tdsWriter.writeRPCStringUnicode(preparedSQL);
@@ -709,7 +623,7 @@ private void buildExecSQLParams(TDSWriter tdsWriter) throws SQLServerException {
private void buildServerCursorExecParams(TDSWriter tdsWriter) throws SQLServerException {
if (getStatementLogger().isLoggable(java.util.logging.Level.FINE))
- getStatementLogger().fine(toString() + ": calling sp_cursorexecute: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL);
+ getStatementLogger().fine(toString() + ": calling sp_cursorexecute: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL);
expectPrepStmtHandle = false;
executedSqlDirectly = false;
@@ -722,8 +636,8 @@ private void buildServerCursorExecParams(TDSWriter tdsWriter) throws SQLServerEx
tdsWriter.writeByte((byte) 0); // RPC procedure option 2 */
// IN
- assert hasPreparedStatementHandle();
- tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), false);
+ assert 0 != prepStmtHandle;
+ tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), false);
// OUT
tdsWriter.writeRPCInt(null, new Integer(0), true);
@@ -740,7 +654,7 @@ private void buildServerCursorExecParams(TDSWriter tdsWriter) throws SQLServerEx
private void buildExecParams(TDSWriter tdsWriter) throws SQLServerException {
if (getStatementLogger().isLoggable(java.util.logging.Level.FINE))
- getStatementLogger().fine(toString() + ": calling sp_execute: PreparedHandle:" + getPreparedStatementHandle() + ", SQL:" + preparedSQL);
+ getStatementLogger().fine(toString() + ": calling sp_execute: PreparedHandle:" + prepStmtHandle + ", SQL:" + preparedSQL);
expectPrepStmtHandle = false;
executedSqlDirectly = true;
@@ -753,8 +667,8 @@ private void buildExecParams(TDSWriter tdsWriter) throws SQLServerException {
tdsWriter.writeByte((byte) 0); // RPC procedure option 2 */
// IN
- assert hasPreparedStatementHandle();
- tdsWriter.writeRPCInt(null, new Integer(getPreparedStatementHandle()), false);
+ assert 0 != prepStmtHandle;
+ tdsWriter.writeRPCInt(null, new Integer(prepStmtHandle), false);
}
private void getParameterEncryptionMetadata(Parameter[] params) throws SQLServerException {
@@ -898,63 +812,14 @@ private void getParameterEncryptionMetadata(Parameter[] params) throws SQLServer
connection.resetCurrentCommand();
}
- /** Manage re-using cached handles */
- private boolean reuseCachedHandle(boolean hasNewTypeDefinitions, boolean discardCurrentCacheItem) {
-
- // No re-use of caching for cursorable statements (statements that WILL use sp_cursor*)
- if (isCursorable(executeMethod))
- return false;
-
- // If current cache item should be discarded make sure it is not used again.
- if (discardCurrentCacheItem && null != cachedPreparedStatementHandle) {
-
- cachedPreparedStatementHandle.removeReference();
-
- // Make sure the cached handle does not get re-used more.
- resetPrepStmtHandle();
- cachedPreparedStatementHandle.setIsExplicitlyDiscarded();
- cachedPreparedStatementHandle = null;
-
- return false;
- }
-
- // New type definitions and existing cached handle reference then deregister cached handle.
- if(hasNewTypeDefinitions) {
- if (null != cachedPreparedStatementHandle && hasPreparedStatementHandle() && prepStmtHandle == cachedPreparedStatementHandle.getHandle()) {
- cachedPreparedStatementHandle.removeReference();
- }
- cachedPreparedStatementHandle = null;
- }
-
- // Check for new cache reference.
- if (null == cachedPreparedStatementHandle) {
- PreparedStatementHandle cachedHandle = connection.getCachedPreparedStatementHandle(new Sha1HashKey(preparedSQL, preparedTypeDefinitions));
-
- // If handle was found then re-use, only if AE is not on, or if it is on, make sure encryptionMetadataIsRetrieved is retrieved.
- if (null != cachedHandle) {
- if (!connection.isColumnEncryptionSettingEnabled()
- || (connection.isColumnEncryptionSettingEnabled() && encryptionMetadataIsRetrieved)) {
- if (cachedHandle.tryAddReference()) {
- setPreparedStatementHandle(cachedHandle.getHandle());
- cachedPreparedStatementHandle = cachedHandle;
- return true;
- }
- }
- }
- }
- return false;
- }
-
private boolean doPrepExec(TDSWriter tdsWriter,
Parameter[] params,
- boolean hasNewTypeDefinitions,
- boolean hasExistingTypeDefinitions) throws SQLServerException {
-
- boolean needsPrepare = (hasNewTypeDefinitions && hasExistingTypeDefinitions) || !hasPreparedStatementHandle();
+ boolean hasNewTypeDefinitions) throws SQLServerException {
+
+ boolean needsPrepare = hasNewTypeDefinitions || 0 == prepStmtHandle;
- // Cursors don't use statement pooling.
+ // Cursors never go the non-prepared statement route.
if (isCursorable(executeMethod)) {
-
if (needsPrepare)
buildServerCursorPrepExecParams(tdsWriter);
else
@@ -963,10 +828,7 @@ private boolean doPrepExec(TDSWriter tdsWriter,
else {
// Move overhead of needing to do prepare & unprepare to only use cases that need more than one execution.
// First execution, use sp_executesql, optimizing for asumption we will not re-use statement.
- if (needsPrepare
- && !connection.getEnablePrepareOnFirstPreparedStatementCall()
- && !isExecutedAtLeastOnce
- ) {
+ if (!connection.getEnablePrepareOnFirstPreparedStatementCall() && !isExecutedAtLeastOnce) {
buildExecSQLParams(tdsWriter);
isExecutedAtLeastOnce = true;
}
@@ -1015,7 +877,10 @@ else if (resultSet != null) {
* @return the result set containing the meta data
*/
/* L0 */ private ResultSet buildExecuteMetaData() throws SQLServerException {
- String fmtSQL = userSQL;
+ String fmtSQL = sqlCommand;
+ if (fmtSQL.indexOf(LEFT_CURLY_BRACKET) >= 0) {
+ fmtSQL = (new JDBCSyntaxTranslator()).translate(fmtSQL);
+ }
ResultSet emptyResultSet = null;
try {
@@ -2557,10 +2422,8 @@ final void doExecutePreparedStatementBatch(PrepStmtBatchExecCmd batchCommand) th
assert paramValues.length == batchParam.length;
for (int i = 0; i < paramValues.length; i++)
batchParam[i] = paramValues[i];
-
- boolean hasExistingTypeDefinitions = preparedTypeDefinitions != null;
- boolean hasNewTypeDefinitions = buildPreparedStrings(batchParam, false);
+ boolean hasNewTypeDefinitions = buildPreparedStrings(batchParam, false);
// Get the encryption metadata for the first batch only.
if ((0 == numBatchesExecuted) && (Util.shouldHonorAEForParameters(stmtColumnEncriptionSetting, connection)) && (0 < batchParam.length)
&& !isInternalEncryptionQuery && !encryptionMetadataIsRetrieved) {
@@ -2583,108 +2446,73 @@ final void doExecutePreparedStatementBatch(PrepStmtBatchExecCmd batchCommand) th
}
}
- // Retry execution if existing handle could not be re-used.
- for(int attempt = 1; attempt <= 2; ++attempt) {
-
- try {
+ if (numBatchesExecuted < numBatchesPrepared) {
+ // assert null != tdsWriter;
+ tdsWriter.writeByte((byte) nBatchStatementDelimiter);
+ }
+ else {
+ resetForReexecute();
+ tdsWriter = batchCommand.startRequest(TDS.PKT_RPC);
+ }
- // Re-use handle if available, requires parameter definitions which are not available until here.
- if (reuseCachedHandle(hasNewTypeDefinitions, 1 < attempt)) {
- hasNewTypeDefinitions = false;
- }
-
- if (numBatchesExecuted < numBatchesPrepared) {
- // assert null != tdsWriter;
- tdsWriter.writeByte((byte) nBatchStatementDelimiter);
+ // If we have to (re)prepare the statement then we must execute it so
+ // that we get back a (new) prepared statement handle to use to
+ // execute additional batches.
+ //
+ // We must always prepare the statement the first time through.
+ // But we may also need to reprepare the statement if, for example,
+ // the size of a batch's string parameter values changes such
+ // that repreparation is necessary.
+ ++numBatchesPrepared;
+ if (doPrepExec(tdsWriter, batchParam, hasNewTypeDefinitions) || numBatchesPrepared == numBatches) {
+ ensureExecuteResultsReader(batchCommand.startResponse(getIsResponseBufferingAdaptive()));
+
+ while (numBatchesExecuted < numBatchesPrepared) {
+ // NOTE:
+ // When making changes to anything below, consider whether similar changes need
+ // to be made to Statement batch execution.
+
+ startResults();
+
+ try {
+ // Get the first result from the batch. If there is no result for this batch
+ // then bail, leaving EXECUTE_FAILED in the current and remaining slots of
+ // the update count array.
+ if (!getNextResult())
+ return;
+
+ // If the result is a ResultSet (rather than an update count) then throw an
+ // exception for this result. The exception gets caught immediately below and
+ // translated into (or added to) a BatchUpdateException.
+ if (null != resultSet) {
+ SQLServerException.makeFromDriverError(connection, this, SQLServerException.getErrString("R_resultsetGeneratedForUpdate"),
+ null, false);
+ }
}
- else {
- resetForReexecute();
- tdsWriter = batchCommand.startRequest(TDS.PKT_RPC);
+ catch (SQLServerException e) {
+ // If the failure was severe enough to close the connection or roll back a
+ // manual transaction, then propagate the error up as a SQLServerException
+ // now, rather than continue with the batch.
+ if (connection.isSessionUnAvailable() || connection.rolledBackTransaction())
+ throw e;
+
+ // Otherwise, the connection is OK and the transaction is still intact,
+ // so just record the failure for the particular batch item.
+ updateCount = Statement.EXECUTE_FAILED;
+ if (null == batchCommand.batchException)
+ batchCommand.batchException = e;
}
- // If we have to (re)prepare the statement then we must execute it so
- // that we get back a (new) prepared statement handle to use to
- // execute additional batches.
- //
- // We must always prepare the statement the first time through.
- // But we may also need to reprepare the statement if, for example,
- // the size of a batch's string parameter values changes such
- // that repreparation is necessary.
- ++numBatchesPrepared;
-
- if (doPrepExec(tdsWriter, batchParam, hasNewTypeDefinitions, hasExistingTypeDefinitions) || numBatchesPrepared == numBatches) {
- ensureExecuteResultsReader(batchCommand.startResponse(getIsResponseBufferingAdaptive()));
-
- boolean retry = false;
- while (numBatchesExecuted < numBatchesPrepared) {
- // NOTE:
- // When making changes to anything below, consider whether similar changes need
- // to be made to Statement batch execution.
-
- startResults();
-
- try {
- // Get the first result from the batch. If there is no result for this batch
- // then bail, leaving EXECUTE_FAILED in the current and remaining slots of
- // the update count array.
- if (!getNextResult())
- return;
-
- // If the result is a ResultSet (rather than an update count) then throw an
- // exception for this result. The exception gets caught immediately below and
- // translated into (or added to) a BatchUpdateException.
- if (null != resultSet) {
- SQLServerException.makeFromDriverError(connection, this, SQLServerException.getErrString("R_resultsetGeneratedForUpdate"),
- null, false);
- }
- }
- catch (SQLServerException e) {
- // If the failure was severe enough to close the connection or roll back a
- // manual transaction, then propagate the error up as a SQLServerException
- // now, rather than continue with the batch.
- if (connection.isSessionUnAvailable() || connection.rolledBackTransaction())
- throw e;
-
- // Retry if invalid handle exception.
- if (retryBasedOnFailedReuseOfCachedHandle(e, attempt)) {
- // Reset number of batches prepared.
- numBatchesPrepared = numBatchesExecuted;
- retry = true;
- break;
- }
-
- // Otherwise, the connection is OK and the transaction is still intact,
- // so just record the failure for the particular batch item.
- updateCount = Statement.EXECUTE_FAILED;
- if (null == batchCommand.batchException)
- batchCommand.batchException = e;
- }
-
- // In batch execution, we have a special update count
- // to indicate that no information was returned
- batchCommand.updateCounts[numBatchesExecuted] = (-1 == updateCount) ? Statement.SUCCESS_NO_INFO : updateCount;
- processBatch();
-
- numBatchesExecuted++;
- }
- if(retry)
- continue;
+ // In batch execution, we have a special update count
+ // to indicate that no information was returned
+ batchCommand.updateCounts[numBatchesExecuted++] = (-1 == updateCount) ? Statement.SUCCESS_NO_INFO : updateCount;
- // Only way to proceed with preparing the next set of batches is if
- // we successfully executed the previously prepared set.
- assert numBatchesExecuted == numBatchesPrepared;
- }
+ processBatch();
}
- catch(SQLException e) {
- if (retryBasedOnFailedReuseOfCachedHandle(e, attempt)) {
- // Reset number of batches prepared.
- numBatchesPrepared = numBatchesExecuted;
- continue;
- }
- else
- throw e;
- }
- break;
+
+ // Only way to proceed with preparing the next set of batches is if
+ // we successfully executed the previously prepared set.
+ assert numBatchesExecuted == numBatchesPrepared;
}
}
}
@@ -2962,41 +2790,14 @@ public final void setNull(int paramIndex,
loggerExternal.exiting(getClassNameLogging(), "setNull");
}
- /**
- * Returns parameter metadata for the prepared statement.
- *
- * @param forceRefresh:
- * If true the cache will not be used to retrieve the metadata.
- *
- * @return
- * Per the description.
- *
- * @throws SQLServerException when an error occurs
- */
- public final ParameterMetaData getParameterMetaData(boolean forceRefresh) throws SQLServerException {
-
- SQLServerParameterMetaData pmd = this.connection.getCachedParameterMetadata(sqlTextCacheKey);
-
- if (!forceRefresh && null != pmd) {
- return pmd;
- }
- else {
- loggerExternal.entering(getClassNameLogging(), "getParameterMetaData");
- checkClosed();
- pmd = new SQLServerParameterMetaData(this, userSQL);
-
- connection.registerCachedParameterMetadata(sqlTextCacheKey, pmd);
-
- loggerExternal.exiting(getClassNameLogging(), "getParameterMetaData", pmd);
-
- return pmd;
- }
- }
-
/* JDBC 3.0 */
/* L3 */ public final ParameterMetaData getParameterMetaData() throws SQLServerException {
- return getParameterMetaData(false);
+ loggerExternal.entering(getClassNameLogging(), "getParameterMetaData");
+ checkClosed();
+ SQLServerParameterMetaData pmd = new SQLServerParameterMetaData(this, userSQL);
+ loggerExternal.exiting(getClassNameLogging(), "getParameterMetaData", pmd);
+ return pmd;
}
/* L3 */ public final void setURL(int parameterIndex,
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java
index 4386a6c5c..d4cc1bb7d 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerResource.java
@@ -190,7 +190,6 @@ protected Object[][] getContents() {
{"R_socketTimeoutPropertyDescription", "The number of milliseconds to wait before the java.net.SocketTimeoutException is raised."},
{"R_serverPreparedStatementDiscardThresholdPropertyDescription", "The threshold for when to close discarded prepare statements on the server (calling a batch of sp_unprepares). A value of 1 or less will cause sp_unprepare to be called immediately on PreparedStatment close."},
{"R_enablePrepareOnFirstPreparedStatementCallPropertyDescription", "This setting specifies whether a prepared statement is prepared (sp_prepexec) on first use (property=true) or on second after first calling sp_executesql (property=false)."},
- {"R_statementPoolingCacheSizePropertyDescription", "This setting specifies the size of the prepared statement cache for a conection. A value less than 1 means no cache."},
{"R_gsscredentialPropertyDescription", "Impersonated GSS Credential to access SQL Server."},
{"R_noParserSupport", "An error occurred while instantiating the required parser. Error: \"{0}\""},
{"R_writeOnlyXML", "Cannot read from this SQLXML instance. This instance is for writing data only."},
@@ -380,7 +379,6 @@ protected Object[][] getContents() {
{"R_invalidFipsEncryptConfig", "Could not enable FIPS due to either encrypt is not true or using trusted certificate settings."},
{"R_invalidFipsProviderConfig", "Could not enable FIPS due to invalid FIPSProvider or TrustStoreType."},
{"R_serverPreparedStatementDiscardThreshold", "The serverPreparedStatementDiscardThreshold {0} is not valid."},
- {"R_statementPoolingCacheSize", "The statementPoolingCacheSize {0} is not valid."},
{"R_kerberosLoginFailedForUsername", "Cannot login with Kerberos principal {0}, check your credentials. {1}"},
{"R_kerberosLoginFailed", "Kerberos Login failed: {0} due to {1} ({2})"},
{"R_StoredProcedureNotFound", "Could not find stored procedure ''{0}''."},
diff --git a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java
index a6a4e93ec..a49a8832d 100644
--- a/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java
+++ b/src/main/java/com/microsoft/sqlserver/jdbc/SQLServerStatement.java
@@ -8,9 +8,6 @@
package com.microsoft.sqlserver.jdbc;
-import static com.microsoft.sqlserver.jdbc.SQLServerConnection.getCachedParsedSQL;
-import static com.microsoft.sqlserver.jdbc.SQLServerConnection.parseAndCacheSQL;
-
import java.sql.BatchUpdateException;
import java.sql.ResultSet;
import java.sql.SQLException;
@@ -27,8 +24,6 @@
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import com.microsoft.sqlserver.jdbc.SQLServerConnection.Sha1HashKey;
-
/**
* SQLServerStatment provides the basic implementation of JDBC statement functionality. It also provides a number of base class implementation methods
* for the JDBC prepared statement and callable Statements. SQLServerStatement's basic role is to execute SQL statements and return update counts and
@@ -766,17 +761,10 @@ final void processResponse(TDSReader tdsReader) throws SQLServerException {
private String ensureSQLSyntax(String sql) throws SQLServerException {
if (sql.indexOf(LEFT_CURLY_BRACKET) >= 0) {
-
- Sha1HashKey cacheKey = new Sha1HashKey(sql);
-
- // Check for cached SQL metadata.
- ParsedSQLCacheItem cacheItem = getCachedParsedSQL(cacheKey);
- if (null == cacheItem)
- cacheItem = parseAndCacheSQL(cacheKey, sql);
-
- // Retrieve from cache item.
- procedureName = cacheItem.procedureName;
- return cacheItem.processedSQL;
+ JDBCSyntaxTranslator translator = new JDBCSyntaxTranslator();
+ String execSyntax = translator.translate(sql);
+ procedureName = translator.getProcedureName();
+ return execSyntax;
}
return sql;
diff --git a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java b/src/main/java/mssql/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
deleted file mode 100644
index a52c70e7b..000000000
--- a/src/main/java/mssql/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
+++ /dev/null
@@ -1,1582 +0,0 @@
-/*
- * Copyright 2010 Google Inc. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package mssql.googlecode.concurrentlinkedhashmap;
-
-import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.IDLE;
-import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.PROCESSING;
-import static mssql.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.REQUIRED;
-import static java.util.Collections.emptyList;
-import static java.util.Collections.unmodifiableMap;
-import static java.util.Collections.unmodifiableSet;
-
-import java.io.InvalidObjectException;
-import java.io.ObjectInputStream;
-import java.io.Serializable;
-import java.util.AbstractCollection;
-import java.util.AbstractMap;
-import java.util.AbstractQueue;
-import java.util.AbstractSet;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-/**
- * A hash table supporting full concurrency of retrievals, adjustable expected
- * concurrency for updates, and a maximum capacity to bound the map by. This
- * implementation differs from {@link ConcurrentHashMap} in that it maintains a
- * page replacement algorithm that is used to evict an entry when the map has
- * exceeded its capacity. Unlike the Java Collections Framework, this
- * map does not have a publicly visible constructor and instances are created
- * through a {@link Builder}.
- *
- * An entry is evicted from the map when the weighted capacity exceeds
- * its maximum weighted capacity threshold. A {@link EntryWeigher}
- * determines how many units of capacity that an entry consumes. The default
- * weigher assigns each value a weight of 1 to bound the map by the
- * total number of key-value pairs. A map that holds collections may choose to
- * weigh values by the number of elements in the collection and bound the map
- * by the total number of elements that it contains. A change to a value that
- * modifies its weight requires that an update operation is performed on the
- * map.
- *
- * An {@link EvictionListener} may be supplied for notification when an entry
- * is evicted from the map. This listener is invoked on a caller's thread and
- * will not block other threads from operating on the map. An implementation
- * should be aware that the caller's thread will not expect long execution
- * times or failures as a side effect of the listener being notified. Execution
- * safety and a fast turn around time can be achieved by performing the
- * operation asynchronously, such as by submitting a task to an
- * {@link java.util.concurrent.ExecutorService}.
- *
- * The concurrency level determines the number of threads that can
- * concurrently modify the table. Using a significantly higher or lower value
- * than needed can waste space or lead to thread contention, but an estimate
- * within an order of magnitude of the ideal value does not usually have a
- * noticeable impact. Because placement in hash tables is essentially random,
- * the actual concurrency will vary.
- *
- * This class and its views and iterators implement all of the
- * optional methods of the {@link Map} and {@link Iterator}
- * interfaces.
- *
- * Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class
- * does not allow null to be used as a key or value. Unlike
- * {@link java.util.LinkedHashMap}, this class does not provide
- * predictable iteration order. A snapshot of the keys and entries may be
- * obtained in ascending and descending order of retention.
- *
- * @author ben.manes@gmail.com (Ben Manes)
- * @param the type of keys maintained by this map
- * @param the type of mapped values
- * @see
- * http://code.google.com/p/concurrentlinkedhashmap/
- */
-public final class ConcurrentLinkedHashMap extends AbstractMap
- implements ConcurrentMap, Serializable {
-
- /*
- * This class performs a best-effort bounding of a ConcurrentHashMap using a
- * page-replacement algorithm to determine which entries to evict when the
- * capacity is exceeded.
- *
- * The page replacement algorithm's data structures are kept eventually
- * consistent with the map. An update to the map and recording of reads may
- * not be immediately reflected on the algorithm's data structures. These
- * structures are guarded by a lock and operations are applied in batches to
- * avoid lock contention. The penalty of applying the batches is spread across
- * threads so that the amortized cost is slightly higher than performing just
- * the ConcurrentHashMap operation.
- *
- * A memento of the reads and writes that were performed on the map are
- * recorded in buffers. These buffers are drained at the first opportunity
- * after a write or when the read buffer exceeds a threshold size. The reads
- * are recorded in a lossy buffer, allowing the reordering operations to be
- * discarded if the draining process cannot keep up. Due to the concurrent
- * nature of the read and write operations a strict policy ordering is not
- * possible, but is observably strict when single threaded.
- *
- * Due to a lack of a strict ordering guarantee, a task can be executed
- * out-of-order, such as a removal followed by its addition. The state of the
- * entry is encoded within the value's weight.
- *
- * Alive: The entry is in both the hash-table and the page replacement policy.
- * This is represented by a positive weight.
- *
- * Retired: The entry is not in the hash-table and is pending removal from the
- * page replacement policy. This is represented by a negative weight.
- *
- * Dead: The entry is not in the hash-table and is not in the page replacement
- * policy. This is represented by a weight of zero.
- *
- * The Least Recently Used page replacement algorithm was chosen due to its
- * simplicity, high hit rate, and ability to be implemented with O(1) time
- * complexity.
- */
-
- /** The number of CPUs */
- static final int NCPU = Runtime.getRuntime().availableProcessors();
-
- /** The maximum weighted capacity of the map. */
- static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
-
- /** The number of read buffers to use. */
- static final int NUMBER_OF_READ_BUFFERS = ceilingNextPowerOfTwo(NCPU);
-
- /** Mask value for indexing into the read buffers. */
- static final int READ_BUFFERS_MASK = NUMBER_OF_READ_BUFFERS - 1;
-
- /** The number of pending read operations before attempting to drain. */
- static final int READ_BUFFER_THRESHOLD = 32;
-
- /** The maximum number of read operations to perform per amortized drain. */
- static final int READ_BUFFER_DRAIN_THRESHOLD = 2 * READ_BUFFER_THRESHOLD;
-
- /** The maximum number of pending reads per buffer. */
- static final int READ_BUFFER_SIZE = 2 * READ_BUFFER_DRAIN_THRESHOLD;
-
- /** Mask value for indexing into the read buffer. */
- static final int READ_BUFFER_INDEX_MASK = READ_BUFFER_SIZE - 1;
-
- /** The maximum number of write operations to perform per amortized drain. */
- static final int WRITE_BUFFER_DRAIN_THRESHOLD = 16;
-
- /** A queue that discards all entries. */
- static final Queue> DISCARDING_QUEUE = new DiscardingQueue();
-
- static int ceilingNextPowerOfTwo(int x) {
- // From Hacker's Delight, Chapter 3, Harry S. Warren Jr.
- return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1));
- }
-
- // The backing data store holding the key-value associations
- final ConcurrentMap> data;
- final int concurrencyLevel;
-
- // These fields provide support to bound the map by a maximum capacity
- final long[] readBufferReadCount;
- final LinkedDeque> evictionDeque;
-
- final AtomicLong weightedSize;
- final AtomicLong capacity;
-
- final Lock evictionLock;
- final Queue writeBuffer;
- final AtomicLong[] readBufferWriteCount;
- final AtomicLong[] readBufferDrainAtWriteCount;
- final AtomicReference>[][] readBuffers;
-
- final AtomicReference drainStatus;
- final EntryWeigher super K, ? super V> weigher;
-
- // These fields provide support for notifying a listener.
- final Queue> pendingNotifications;
- final EvictionListener listener;
-
- transient Set keySet;
- transient Collection values;
- transient Set> entrySet;
-
- /**
- * Creates an instance based on the builder's configuration.
- */
- @SuppressWarnings({"unchecked", "cast"})
- private ConcurrentLinkedHashMap(Builder builder) {
- // The data store and its maximum capacity
- concurrencyLevel = builder.concurrencyLevel;
- capacity = new AtomicLong(Math.min(builder.capacity, MAXIMUM_CAPACITY));
- data = new ConcurrentHashMap>(builder.initialCapacity, 0.75f, concurrencyLevel);
-
- // The eviction support
- weigher = builder.weigher;
- evictionLock = new ReentrantLock();
- weightedSize = new AtomicLong();
- evictionDeque = new LinkedDeque>();
- writeBuffer = new ConcurrentLinkedQueue();
- drainStatus = new AtomicReference(IDLE);
-
- readBufferReadCount = new long[NUMBER_OF_READ_BUFFERS];
- readBufferWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
- readBufferDrainAtWriteCount = new AtomicLong[NUMBER_OF_READ_BUFFERS];
- readBuffers = new AtomicReference[NUMBER_OF_READ_BUFFERS][READ_BUFFER_SIZE];
- for (int i = 0; i < NUMBER_OF_READ_BUFFERS; i++) {
- readBufferWriteCount[i] = new AtomicLong();
- readBufferDrainAtWriteCount[i] = new AtomicLong();
- readBuffers[i] = new AtomicReference[READ_BUFFER_SIZE];
- for (int j = 0; j < READ_BUFFER_SIZE; j++) {
- readBuffers[i][j] = new AtomicReference>();
- }
- }
-
- // The notification queue and listener
- listener = builder.listener;
- pendingNotifications = (listener == DiscardingListener.INSTANCE)
- ? (Queue>) DISCARDING_QUEUE
- : new ConcurrentLinkedQueue>();
- }
-
- /** Ensures that the object is not null. */
- static void checkNotNull(Object o) {
- if (o == null) {
- throw new NullPointerException();
- }
- }
-
- /** Ensures that the argument expression is true. */
- static void checkArgument(boolean expression) {
- if (!expression) {
- throw new IllegalArgumentException();
- }
- }
-
- /** Ensures that the state expression is true. */
- static void checkState(boolean expression) {
- if (!expression) {
- throw new IllegalStateException();
- }
- }
-
- /* ---------------- Eviction Support -------------- */
-
- /**
- * Retrieves the maximum weighted capacity of the map.
- *
- * @return the maximum weighted capacity
- */
- public long capacity() {
- return capacity.get();
- }
-
- /**
- * Sets the maximum weighted capacity of the map and eagerly evicts entries
- * until it shrinks to the appropriate size.
- *
- * @param capacity the maximum weighted capacity of the map
- * @throws IllegalArgumentException if the capacity is negative
- */
- public void setCapacity(long capacity) {
- checkArgument(capacity >= 0);
- evictionLock.lock();
- try {
- this.capacity.lazySet(Math.min(capacity, MAXIMUM_CAPACITY));
- drainBuffers();
- evict();
- } finally {
- evictionLock.unlock();
- }
- notifyListener();
- }
-
- /** Determines whether the map has exceeded its capacity. */
- boolean hasOverflowed() {
- return weightedSize.get() > capacity.get();
- }
-
- /**
- * Evicts entries from the map while it exceeds the capacity and appends
- * evicted entries to the notification queue for processing.
- */
- void evict() {
- // Attempts to evict entries from the map if it exceeds the maximum
- // capacity. If the eviction fails due to a concurrent removal of the
- // victim, that removal may cancel out the addition that triggered this
- // eviction. The victim is eagerly unlinked before the removal task so
- // that if an eviction is still required then a new victim will be chosen
- // for removal.
- while (hasOverflowed()) {
- final Node node = evictionDeque.poll();
-
- // If weighted values are used, then the pending operations will adjust
- // the size to reflect the correct weight
- if (node == null) {
- return;
- }
-
- // Notify the listener only if the entry was evicted
- if (data.remove(node.key, node)) {
- pendingNotifications.add(node);
- }
-
- makeDead(node);
- }
- }
-
- /**
- * Performs the post-processing work required after a read.
- *
- * @param node the entry in the page replacement policy
- */
- void afterRead(Node node) {
- final int bufferIndex = readBufferIndex();
- final long writeCount = recordRead(bufferIndex, node);
- drainOnReadIfNeeded(bufferIndex, writeCount);
- notifyListener();
- }
-
- /** Returns the index to the read buffer to record into. */
- static int readBufferIndex() {
- // A buffer is chosen by the thread's id so that tasks are distributed in a
- // pseudo evenly manner. This helps avoid hot entries causing contention
- // due to other threads trying to append to the same buffer.
- return ((int) Thread.currentThread().getId()) & READ_BUFFERS_MASK;
- }
-
- /**
- * Records a read in the buffer and return its write count.
- *
- * @param bufferIndex the index to the chosen read buffer
- * @param node the entry in the page replacement policy
- * @return the number of writes on the chosen read buffer
- */
- long recordRead(int bufferIndex, Node node) {
- // The location in the buffer is chosen in a racy fashion as the increment
- // is not atomic with the insertion. This means that concurrent reads can
- // overlap and overwrite one another, resulting in a lossy buffer.
- final AtomicLong counter = readBufferWriteCount[bufferIndex];
- final long writeCount = counter.get();
- counter.lazySet(writeCount + 1);
-
- final int index = (int) (writeCount & READ_BUFFER_INDEX_MASK);
- readBuffers[bufferIndex][index].lazySet(node);
-
- return writeCount;
- }
-
- /**
- * Attempts to drain the buffers if it is determined to be needed when
- * post-processing a read.
- *
- * @param bufferIndex the index to the chosen read buffer
- * @param writeCount the number of writes on the chosen read buffer
- */
- void drainOnReadIfNeeded(int bufferIndex, long writeCount) {
- final long pending = (writeCount - readBufferDrainAtWriteCount[bufferIndex].get());
- final boolean delayable = (pending < READ_BUFFER_THRESHOLD);
- final DrainStatus status = drainStatus.get();
- if (status.shouldDrainBuffers(delayable)) {
- tryToDrainBuffers();
- }
- }
-
- /**
- * Performs the post-processing work required after a write.
- *
- * @param task the pending operation to be applied
- */
- void afterWrite(Runnable task) {
- writeBuffer.add(task);
- drainStatus.lazySet(REQUIRED);
- tryToDrainBuffers();
- notifyListener();
- }
-
- /**
- * Attempts to acquire the eviction lock and apply the pending operations, up
- * to the amortized threshold, to the page replacement policy.
- */
- void tryToDrainBuffers() {
- if (evictionLock.tryLock()) {
- try {
- drainStatus.lazySet(PROCESSING);
- drainBuffers();
- } finally {
- drainStatus.compareAndSet(PROCESSING, IDLE);
- evictionLock.unlock();
- }
- }
- }
-
- /** Drains the read and write buffers up to an amortized threshold. */
- void drainBuffers() {
- drainReadBuffers();
- drainWriteBuffer();
- }
-
- /** Drains the read buffers, each up to an amortized threshold. */
- void drainReadBuffers() {
- final int start = (int) Thread.currentThread().getId();
- final int end = start + NUMBER_OF_READ_BUFFERS;
- for (int i = start; i < end; i++) {
- drainReadBuffer(i & READ_BUFFERS_MASK);
- }
- }
-
- /** Drains the read buffer up to an amortized threshold. */
- void drainReadBuffer(int bufferIndex) {
- final long writeCount = readBufferWriteCount[bufferIndex].get();
- for (int i = 0; i < READ_BUFFER_DRAIN_THRESHOLD; i++) {
- final int index = (int) (readBufferReadCount[bufferIndex] & READ_BUFFER_INDEX_MASK);
- final AtomicReference> slot = readBuffers[bufferIndex][index];
- final Node node = slot.get();
- if (node == null) {
- break;
- }
-
- slot.lazySet(null);
- applyRead(node);
- readBufferReadCount[bufferIndex]++;
- }
- readBufferDrainAtWriteCount[bufferIndex].lazySet(writeCount);
- }
-
- /** Updates the node's location in the page replacement policy. */
- void applyRead(Node node) {
- // An entry may be scheduled for reordering despite having been removed.
- // This can occur when the entry was concurrently read while a writer was
- // removing it. If the entry is no longer linked then it does not need to
- // be processed.
- if (evictionDeque.contains(node)) {
- evictionDeque.moveToBack(node);
- }
- }
-
- /** Drains the read buffer up to an amortized threshold. */
- void drainWriteBuffer() {
- for (int i = 0; i < WRITE_BUFFER_DRAIN_THRESHOLD; i++) {
- final Runnable task = writeBuffer.poll();
- if (task == null) {
- break;
- }
- task.run();
- }
- }
-
- /**
- * Attempts to transition the node from the alive state to the
- * retired state.
- *
- * @param node the entry in the page replacement policy
- * @param expect the expected weighted value
- * @return if successful
- */
- boolean tryToRetire(Node node, WeightedValue expect) {
- if (expect.isAlive()) {
- final WeightedValue retired = new WeightedValue(expect.value, -expect.weight);
- return node.compareAndSet(expect, retired);
- }
- return false;
- }
-
- /**
- * Atomically transitions the node from the alive state to the
- * retired state, if a valid transition.
- *
- * @param node the entry in the page replacement policy
- */
- void makeRetired(Node node) {
- for (;;) {
- final WeightedValue current = node.get();
- if (!current.isAlive()) {
- return;
- }
- final WeightedValue retired = new WeightedValue(current.value, -current.weight);
- if (node.compareAndSet(current, retired)) {
- return;
- }
- }
- }
-
- /**
- * Atomically transitions the node to the dead state and decrements
- * the weightedSize.
- *
- * @param node the entry in the page replacement policy
- */
- void makeDead(Node node) {
- for (;;) {
- WeightedValue current = node.get();
- WeightedValue dead = new WeightedValue(current.value, 0);
- if (node.compareAndSet(current, dead)) {
- weightedSize.lazySet(weightedSize.get() - Math.abs(current.weight));
- return;
- }
- }
- }
-
- /** Notifies the listener of entries that were evicted. */
- void notifyListener() {
- Node node;
- while ((node = pendingNotifications.poll()) != null) {
- listener.onEviction(node.key, node.getValue());
- }
- }
-
- /** Adds the node to the page replacement policy. */
- final class AddTask implements Runnable {
- final Node node;
- final int weight;
-
- AddTask(Node node, int weight) {
- this.weight = weight;
- this.node = node;
- }
-
- @Override
- public void run() {
- weightedSize.lazySet(weightedSize.get() + weight);
-
- // ignore out-of-order write operations
- if (node.get().isAlive()) {
- evictionDeque.add(node);
- evict();
- }
- }
- }
-
- /** Removes a node from the page replacement policy. */
- final class RemovalTask implements Runnable {
- final Node node;
-
- RemovalTask(Node node) {
- this.node = node;
- }
-
- @Override
- public void run() {
- // add may not have been processed yet
- evictionDeque.remove(node);
- makeDead(node);
- }
- }
-
- /** Updates the weighted size and evicts an entry on overflow. */
- final class UpdateTask implements Runnable {
- final int weightDifference;
- final Node node;
-
- public UpdateTask(Node node, int weightDifference) {
- this.weightDifference = weightDifference;
- this.node = node;
- }
-
- @Override
- public void run() {
- weightedSize.lazySet(weightedSize.get() + weightDifference);
- applyRead(node);
- evict();
- }
- }
-
- /* ---------------- Concurrent Map Support -------------- */
-
- @Override
- public boolean isEmpty() {
- return data.isEmpty();
- }
-
- @Override
- public int size() {
- return data.size();
- }
-
- /**
- * Returns the weighted size of this map.
- *
- * @return the combined weight of the values in this map
- */
- public long weightedSize() {
- return Math.max(0, weightedSize.get());
- }
-
- @Override
- public void clear() {
- evictionLock.lock();
- try {
- // Discard all entries
- Node node;
- while ((node = evictionDeque.poll()) != null) {
- data.remove(node.key, node);
- makeDead(node);
- }
-
- // Discard all pending reads
- for (AtomicReference>[] buffer : readBuffers) {
- for (AtomicReference> slot : buffer) {
- slot.lazySet(null);
- }
- }
-
- // Apply all pending writes
- Runnable task;
- while ((task = writeBuffer.poll()) != null) {
- task.run();
- }
- } finally {
- evictionLock.unlock();
- }
- }
-
- @Override
- public boolean containsKey(Object key) {
- return data.containsKey(key);
- }
-
- @Override
- public boolean containsValue(Object value) {
- checkNotNull(value);
-
- for (Node node : data.values()) {
- if (node.getValue().equals(value)) {
- return true;
- }
- }
- return false;
- }
-
- @Override
- public V get(Object key) {
- final Node node = data.get(key);
- if (node == null) {
- return null;
- }
- afterRead(node);
- return node.getValue();
- }
-
- /**
- * Returns the value to which the specified key is mapped, or {@code null}
- * if this map contains no mapping for the key. This method differs from
- * {@link #get(Object)} in that it does not record the operation with the
- * page replacement policy.
- *
- * @param key the key whose associated value is to be returned
- * @return the value to which the specified key is mapped, or
- * {@code null} if this map contains no mapping for the key
- * @throws NullPointerException if the specified key is null
- */
- public V getQuietly(Object key) {
- final Node node = data.get(key);
- return (node == null) ? null : node.getValue();
- }
-
- @Override
- public V put(K key, V value) {
- return put(key, value, false);
- }
-
- @Override
- public V putIfAbsent(K key, V value) {
- return put(key, value, true);
- }
-
- /**
- * Adds a node to the list and the data store. If an existing node is found,
- * then its value is updated if allowed.
- *
- * @param key key with which the specified value is to be associated
- * @param value value to be associated with the specified key
- * @param onlyIfAbsent a write is performed only if the key is not already
- * associated with a value
- * @return the prior value in the data store or null if no mapping was found
- */
- V put(K key, V value, boolean onlyIfAbsent) {
- checkNotNull(key);
- checkNotNull(value);
-
- final int weight = weigher.weightOf(key, value);
- final WeightedValue weightedValue = new WeightedValue(value, weight);
- final Node node = new Node(key, weightedValue);
-
- for (;;) {
- final Node prior = data.putIfAbsent(node.key, node);
- if (prior == null) {
- afterWrite(new AddTask(node, weight));
- return null;
- } else if (onlyIfAbsent) {
- afterRead(prior);
- return prior.getValue();
- }
- for (;;) {
- final WeightedValue oldWeightedValue = prior.get();
- if (!oldWeightedValue.isAlive()) {
- break;
- }
-
- if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
- final int weightedDifference = weight - oldWeightedValue.weight;
- if (weightedDifference == 0) {
- afterRead(prior);
- } else {
- afterWrite(new UpdateTask(prior, weightedDifference));
- }
- return oldWeightedValue.value;
- }
- }
- }
- }
-
- @Override
- public V remove(Object key) {
- final Node node = data.remove(key);
- if (node == null) {
- return null;
- }
-
- makeRetired(node);
- afterWrite(new RemovalTask(node));
- return node.getValue();
- }
-
- @Override
- public boolean remove(Object key, Object value) {
- final Node node = data.get(key);
- if ((node == null) || (value == null)) {
- return false;
- }
-
- WeightedValue weightedValue = node.get();
- for (;;) {
- if (weightedValue.contains(value)) {
- if (tryToRetire(node, weightedValue)) {
- if (data.remove(key, node)) {
- afterWrite(new RemovalTask(node));
- return true;
- }
- } else {
- weightedValue = node.get();
- if (weightedValue.isAlive()) {
- // retry as an intermediate update may have replaced the value with
- // an equal instance that has a different reference identity
- continue;
- }
- }
- }
- return false;
- }
- }
-
- @Override
- public V replace(K key, V value) {
- checkNotNull(key);
- checkNotNull(value);
-
- final int weight = weigher.weightOf(key, value);
- final WeightedValue weightedValue = new WeightedValue(value, weight);
-
- final Node node = data.get(key);
- if (node == null) {
- return null;
- }
- for (;;) {
- final WeightedValue oldWeightedValue = node.get();
- if (!oldWeightedValue.isAlive()) {
- return null;
- }
- if (node.compareAndSet(oldWeightedValue, weightedValue)) {
- final int weightedDifference = weight - oldWeightedValue.weight;
- if (weightedDifference == 0) {
- afterRead(node);
- } else {
- afterWrite(new UpdateTask(node, weightedDifference));
- }
- return oldWeightedValue.value;
- }
- }
- }
-
- @Override
- public boolean replace(K key, V oldValue, V newValue) {
- checkNotNull(key);
- checkNotNull(oldValue);
- checkNotNull(newValue);
-
- final int weight = weigher.weightOf(key, newValue);
- final WeightedValue newWeightedValue = new WeightedValue(newValue, weight);
-
- final Node node = data.get(key);
- if (node == null) {
- return false;
- }
- for (;;) {
- final WeightedValue weightedValue = node.get();
- if (!weightedValue.isAlive() || !weightedValue.contains(oldValue)) {
- return false;
- }
- if (node.compareAndSet(weightedValue, newWeightedValue)) {
- final int weightedDifference = weight - weightedValue.weight;
- if (weightedDifference == 0) {
- afterRead(node);
- } else {
- afterWrite(new UpdateTask(node, weightedDifference));
- }
- return true;
- }
- }
- }
-
- @Override
- public Set keySet() {
- final Set ks = keySet;
- return (ks == null) ? (keySet = new KeySet()) : ks;
- }
-
- /**
- * Returns a unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the ascending order in which its entries are considered eligible for
- * retention, from the least-likely to be retained to the most-likely.
- *
- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @return an ascending snapshot view of the keys in this map
- */
- public Set ascendingKeySet() {
- return ascendingKeySetWithLimit(Integer.MAX_VALUE);
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the ascending order in which its entries are considered eligible for
- * retention, from the least-likely to be retained to the most-likely.
- *
- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @param limit the maximum size of the returned set
- * @return a ascending snapshot view of the keys in this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Set ascendingKeySetWithLimit(int limit) {
- return orderedKeySet(true, limit);
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the descending order in which its entries are considered eligible for
- * retention, from the most-likely to be retained to the least-likely.
- *
- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @return a descending snapshot view of the keys in this map
- */
- public Set descendingKeySet() {
- return descendingKeySetWithLimit(Integer.MAX_VALUE);
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
- * this map. The set's iterator returns the keys whose order of iteration is
- * the descending order in which its entries are considered eligible for
- * retention, from the most-likely to be retained to the least-likely.
- *
- * Beware that, unlike in {@link #keySet()}, obtaining the set is NOT
- * a constant-time operation. Because of the asynchronous nature of the page
- * replacement policy, determining the retention ordering requires a traversal
- * of the keys.
- *
- * @param limit the maximum size of the returned set
- * @return a descending snapshot view of the keys in this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Set descendingKeySetWithLimit(int limit) {
- return orderedKeySet(false, limit);
- }
-
- Set orderedKeySet(boolean ascending, int limit) {
- checkArgument(limit >= 0);
- evictionLock.lock();
- try {
- drainBuffers();
-
- final int initialCapacity = (weigher == Weighers.entrySingleton())
- ? Math.min(limit, (int) weightedSize())
- : 16;
- final Set keys = new LinkedHashSet(initialCapacity);
- final Iterator> iterator = ascending
- ? evictionDeque.iterator()
- : evictionDeque.descendingIterator();
- while (iterator.hasNext() && (limit > keys.size())) {
- keys.add(iterator.next().key);
- }
- return unmodifiableSet(keys);
- } finally {
- evictionLock.unlock();
- }
- }
-
- @Override
- public Collection values() {
- final Collection vs = values;
- return (vs == null) ? (values = new Values()) : vs;
- }
-
- @Override
- public Set> entrySet() {
- final Set> es = entrySet;
- return (es == null) ? (entrySet = new EntrySet()) : es;
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the ascending order in which its entries are considered
- * eligible for retention, from the least-likely to be retained to the
- * most-likely.
- *
- * Beware that obtaining the mappings is NOT a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @return a ascending snapshot view of this map
- */
- public Map ascendingMap() {
- return ascendingMapWithLimit(Integer.MAX_VALUE);
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the ascending order in which its entries are considered
- * eligible for retention, from the least-likely to be retained to the
- * most-likely.
- *
- * Beware that obtaining the mappings is NOT a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @param limit the maximum size of the returned map
- * @return a ascending snapshot view of this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Map ascendingMapWithLimit(int limit) {
- return orderedMap(true, limit);
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the descending order in which its entries are considered
- * eligible for retention, from the most-likely to be retained to the
- * least-likely.
- *
- * Beware that obtaining the mappings is NOT a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @return a descending snapshot view of this map
- */
- public Map descendingMap() {
- return descendingMapWithLimit(Integer.MAX_VALUE);
- }
-
- /**
- * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
- * in this map. The map's collections return the mappings whose order of
- * iteration is the descending order in which its entries are considered
- * eligible for retention, from the most-likely to be retained to the
- * least-likely.
- *
- * Beware that obtaining the mappings is NOT a constant-time
- * operation. Because of the asynchronous nature of the page replacement
- * policy, determining the retention ordering requires a traversal of the
- * entries.
- *
- * @param limit the maximum size of the returned map
- * @return a descending snapshot view of this map
- * @throws IllegalArgumentException if the limit is negative
- */
- public Map descendingMapWithLimit(int limit) {
- return orderedMap(false, limit);
- }
-
- Map orderedMap(boolean ascending, int limit) {
- checkArgument(limit >= 0);
- evictionLock.lock();
- try {
- drainBuffers();
-
- final int initialCapacity = (weigher == Weighers.entrySingleton())
- ? Math.min(limit, (int) weightedSize())
- : 16;
- final Map map = new LinkedHashMap(initialCapacity);
- final Iterator> iterator = ascending
- ? evictionDeque.iterator()
- : evictionDeque.descendingIterator();
- while (iterator.hasNext() && (limit > map.size())) {
- Node node = iterator.next();
- map.put(node.key, node.getValue());
- }
- return unmodifiableMap(map);
- } finally {
- evictionLock.unlock();
- }
- }
-
- /** The draining status of the buffers. */
- enum DrainStatus {
-
- /** A drain is not taking place. */
- IDLE {
- @Override boolean shouldDrainBuffers(boolean delayable) {
- return !delayable;
- }
- },
-
- /** A drain is required due to a pending write modification. */
- REQUIRED {
- @Override boolean shouldDrainBuffers(boolean delayable) {
- return true;
- }
- },
-
- /** A drain is in progress. */
- PROCESSING {
- @Override boolean shouldDrainBuffers(boolean delayable) {
- return false;
- }
- };
-
- /**
- * Determines whether the buffers should be drained.
- *
- * @param delayable if a drain should be delayed until required
- * @return if a drain should be attempted
- */
- abstract boolean shouldDrainBuffers(boolean delayable);
- }
-
- /** A value, its weight, and the entry's status. */
- static final class WeightedValue {
- final int weight;
- final V value;
-
- WeightedValue(V value, int weight) {
- this.weight = weight;
- this.value = value;
- }
-
- boolean contains(Object o) {
- return (o == value) || value.equals(o);
- }
-
- /**
- * If the entry is available in the hash-table and page replacement policy.
- */
- boolean isAlive() {
- return weight > 0;
- }
-
- /**
- * If the entry was removed from the hash-table and is awaiting removal from
- * the page replacement policy.
- */
- boolean isRetired() {
- return weight < 0;
- }
-
- /**
- * If the entry was removed from the hash-table and the page replacement
- * policy.
- */
- boolean isDead() {
- return weight == 0;
- }
- }
-
- /**
- * A node contains the key, the weighted value, and the linkage pointers on
- * the page-replacement algorithm's data structures.
- */
- @SuppressWarnings("serial")
- static final class Node extends AtomicReference>
- implements Linked> {
- final K key;
- Node prev;
- Node next;
-
- /** Creates a new, unlinked node. */
- Node(K key, WeightedValue weightedValue) {
- super(weightedValue);
- this.key = key;
- }
-
- @Override
- public Node getPrevious() {
- return prev;
- }
-
- @Override
- public void setPrevious(Node prev) {
- this.prev = prev;
- }
-
- @Override
- public Node getNext() {
- return next;
- }
-
- @Override
- public void setNext(Node next) {
- this.next = next;
- }
-
- /** Retrieves the value held by the current WeightedValue. */
- V getValue() {
- return get().value;
- }
- }
-
- /** An adapter to safely externalize the keys. */
- final class KeySet extends AbstractSet {
- final ConcurrentLinkedHashMap map = ConcurrentLinkedHashMap.this;
-
- @Override
- public int size() {
- return map.size();
- }
-
- @Override
- public void clear() {
- map.clear();
- }
-
- @Override
- public Iterator iterator() {
- return new KeyIterator();
- }
-
- @Override
- public boolean contains(Object obj) {
- return containsKey(obj);
- }
-
- @Override
- public boolean remove(Object obj) {
- return (map.remove(obj) != null);
- }
-
- @Override
- public Object[] toArray() {
- return map.data.keySet().toArray();
- }
-
- @Override
- public T[] toArray(T[] array) {
- return map.data.keySet().toArray(array);
- }
- }
-
- /** An adapter to safely externalize the key iterator. */
- final class KeyIterator implements Iterator {
- final Iterator iterator = data.keySet().iterator();
- K current;
-
- @Override
- public boolean hasNext() {
- return iterator.hasNext();
- }
-
- @Override
- public K next() {
- current = iterator.next();
- return current;
- }
-
- @Override
- public void remove() {
- checkState(current != null);
- ConcurrentLinkedHashMap.this.remove(current);
- current = null;
- }
- }
-
- /** An adapter to safely externalize the values. */
- final class Values extends AbstractCollection {
-
- @Override
- public int size() {
- return ConcurrentLinkedHashMap.this.size();
- }
-
- @Override
- public void clear() {
- ConcurrentLinkedHashMap.this.clear();
- }
-
- @Override
- public Iterator iterator() {
- return new ValueIterator();
- }
-
- @Override
- public boolean contains(Object o) {
- return containsValue(o);
- }
- }
-
- /** An adapter to safely externalize the value iterator. */
- final class ValueIterator implements Iterator {
- final Iterator> iterator = data.values().iterator();
- Node current;
-
- @Override
- public boolean hasNext() {
- return iterator.hasNext();
- }
-
- @Override
- public V next() {
- current = iterator.next();
- return current.getValue();
- }
-
- @Override
- public void remove() {
- checkState(current != null);
- ConcurrentLinkedHashMap.this.remove(current.key);
- current = null;
- }
- }
-
- /** An adapter to safely externalize the entries. */
- final class EntrySet extends AbstractSet> {
- final ConcurrentLinkedHashMap map = ConcurrentLinkedHashMap.this;
-
- @Override
- public int size() {
- return map.size();
- }
-
- @Override
- public void clear() {
- map.clear();
- }
-
- @Override
- public Iterator> iterator() {
- return new EntryIterator();
- }
-
- @Override
- public boolean contains(Object obj) {
- if (!(obj instanceof Entry, ?>)) {
- return false;
- }
- Entry, ?> entry = (Entry, ?>) obj;
- Node node = map.data.get(entry.getKey());
- return (node != null) && (node.getValue().equals(entry.getValue()));
- }
-
- @Override
- public boolean add(Entry entry) {
- return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null);
- }
-
- @Override
- public boolean remove(Object obj) {
- if (!(obj instanceof Entry, ?>)) {
- return false;
- }
- Entry, ?> entry = (Entry, ?>) obj;
- return map.remove(entry.getKey(), entry.getValue());
- }
- }
-
- /** An adapter to safely externalize the entry iterator. */
- final class EntryIterator implements Iterator> {
- final Iterator> iterator = data.values().iterator();
- Node current;
-
- @Override
- public boolean hasNext() {
- return iterator.hasNext();
- }
-
- @Override
- public Entry next() {
- current = iterator.next();
- return new WriteThroughEntry(current);
- }
-
- @Override
- public void remove() {
- checkState(current != null);
- ConcurrentLinkedHashMap.this.remove(current.key);
- current = null;
- }
- }
-
- /** An entry that allows updates to write through to the map. */
- final class WriteThroughEntry extends SimpleEntry {
- static final long serialVersionUID = 1;
-
- WriteThroughEntry(Node node) {
- super(node.key, node.getValue());
- }
-
- @Override
- public V setValue(V value) {
- put(getKey(), value);
- return super.setValue(value);
- }
-
- Object writeReplace() {
- return new SimpleEntry(this);
- }
- }
-
- /** A weigher that enforces that the weight falls within a valid range. */
- static final class BoundedEntryWeigher implements EntryWeigher, Serializable {
- static final long serialVersionUID = 1;
- final EntryWeigher super K, ? super V> weigher;
-
- BoundedEntryWeigher(EntryWeigher super K, ? super V> weigher) {
- checkNotNull(weigher);
- this.weigher = weigher;
- }
-
- @Override
- public int weightOf(K key, V value) {
- int weight = weigher.weightOf(key, value);
- checkArgument(weight >= 1);
- return weight;
- }
-
- Object writeReplace() {
- return weigher;
- }
- }
-
- /** A queue that discards all additions and is always empty. */
- static final class DiscardingQueue extends AbstractQueue