Skip to content

Commit

Permalink
Added a missing unlock in getDestinationMetadata() (#2484) (#2492)
Browse files Browse the repository at this point in the history
* Added a missing unlock

* Clean up locks as per pr review

* Added test

* Resolve codeql error

* Extend timeout time

* Removing test to narrow down on pipeline failures

* Adding a clear to see if this fixes prior errors

* PR review changes

* Address PR comments

* More

Co-authored-by: Jeff Wasty <v-jeffwasty@microsoft.com>
  • Loading branch information
tkyc and Jeffery-Wasty authored Aug 22, 2024
1 parent fbd401c commit 64d530e
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1742,10 +1742,10 @@ private void getDestinationMetadata() throws SQLServerException {
if (null == destColumnMetadata || destColumnMetadata.isEmpty()) {
if (connection.getcacheBulkCopyMetadata()) {
DESTINATION_COL_METADATA_LOCK.lock();
destColumnMetadata = BULK_COPY_OPERATION_CACHE.get(key);
try {
destColumnMetadata = BULK_COPY_OPERATION_CACHE.get(key);

if (null == destColumnMetadata || destColumnMetadata.isEmpty()) {
try {
if (null == destColumnMetadata || destColumnMetadata.isEmpty()) {
setDestinationColumnMetadata(escapedDestinationTableName);

// We are caching the following metadata about the table:
Expand All @@ -1759,9 +1759,9 @@ private void getDestinationMetadata() throws SQLServerException {
// scenario, we can't detect this without making an additional metadata query, which would
// defeat the purpose of caching.
BULK_COPY_OPERATION_CACHE.put(key, destColumnMetadata);
} finally {
DESTINATION_COL_METADATA_LOCK.unlock();
}
} finally {
DESTINATION_COL_METADATA_LOCK.unlock();
}

if (loggerExternal.isLoggable(Level.FINER)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
import static org.junit.Assert.assertNotNull;
import static org.junit.Assume.assumeTrue;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;

import java.lang.reflect.Field;
Expand All @@ -27,8 +27,12 @@
import java.util.Calendar;
import java.util.HashMap;
import java.util.TimeZone;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import com.microsoft.sqlserver.jdbc.SQLServerPreparedStatement;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
Expand All @@ -39,6 +43,7 @@

import com.microsoft.sqlserver.jdbc.RandomUtil;
import com.microsoft.sqlserver.jdbc.SQLServerConnection;
import com.microsoft.sqlserver.jdbc.SQLServerPreparedStatement;
import com.microsoft.sqlserver.jdbc.SQLServerStatement;
import com.microsoft.sqlserver.jdbc.TestResource;
import com.microsoft.sqlserver.jdbc.TestUtils;
Expand Down Expand Up @@ -206,6 +211,82 @@ public void testSqlServerBulkCopyCachingConnectionLevel() throws Exception {
}
}

@Test
public void testSqlServerBulkCopyCachingConnectionLevelMultiThreaded() throws Exception {
// Needs to be on a JDK version greater than 8
assumeTrue(TestUtils.getJVMVersion() > 8);

Calendar gmtCal = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
long ms = 1578743412000L;
long timeOut = 30000;
int NUMBER_SIMULTANEOUS_INSERTS = 5;

try (SQLServerConnection con = (SQLServerConnection) DriverManager.getConnection(connectionString
+ ";useBulkCopyForBatchInsert=true;cacheBulkCopyMetadata=true;sendTemporalDataTypesAsStringForBulkCopy=false;");
Statement stmt = con.createStatement()) {

TestUtils.dropTableIfExists(timestampTable1, stmt);
String createSqlTable1 = "CREATE TABLE " + timestampTable1 + " (c1 DATETIME2(3))";
stmt.execute(createSqlTable1);

Field bulkcopyMetadataCacheField;

if (con.getClass().getName().equals("com.microsoft.sqlserver.jdbc.SQLServerConnection43")) {
bulkcopyMetadataCacheField = con.getClass().getSuperclass()
.getDeclaredField("BULK_COPY_OPERATION_CACHE");
} else {
bulkcopyMetadataCacheField = con.getClass().getDeclaredField("BULK_COPY_OPERATION_CACHE");
}

bulkcopyMetadataCacheField.setAccessible(true);
Object bulkcopyCache = bulkcopyMetadataCacheField.get(con);

((HashMap<?, ?>) bulkcopyCache).clear();

TimerTask task = new TimerTask() {
public void run() {
((HashMap<?, ?>) bulkcopyCache).clear();
fail(TestResource.getResource("R_executionTooLong"));
}
};
Timer timer = new Timer("Timer");
timer.schedule(task, timeOut); // Run a timer to help us exit if we get deadlocked

final CountDownLatch countDownLatch = new CountDownLatch(NUMBER_SIMULTANEOUS_INSERTS);
Runnable runnable = () -> {
try {
for (int i = 0; i < 5; ++i) {
PreparedStatement preparedStatement = con
.prepareStatement("INSERT INTO " + timestampTable1 + " VALUES(?)");
Timestamp timestamp = new Timestamp(ms);
preparedStatement.setTimestamp(1, timestamp, gmtCal);
preparedStatement.addBatch();
preparedStatement.executeBatch();
}
countDownLatch.countDown();
countDownLatch.await();
} catch (Exception e) {
fail(TestResource.getResource("R_unexpectedException") + e.getMessage());
} finally {
((HashMap<?, ?>) bulkcopyCache).clear();
}
};

ExecutorService executor = Executors.newFixedThreadPool(NUMBER_SIMULTANEOUS_INSERTS);

try {
for (int i = 0; i < NUMBER_SIMULTANEOUS_INSERTS; i++) {
executor.submit(runnable);
}
executor.shutdown();
} catch (Exception e) {
fail(TestResource.getResource("R_unexpectedException") + e.getMessage());
} finally {
((HashMap<?, ?>) bulkcopyCache).clear();
}
}
}

@Test
public void testValidTimezoneForTimestampBatchInsertWithBulkCopy() throws Exception {
Calendar gmtCal = Calendar.getInstance(TimeZone.getTimeZone("GMT"));
Expand Down

0 comments on commit 64d530e

Please sign in to comment.