Skip to content

Commit

Permalink
Merge pull request #537 from nscuro/issue-362
Browse files Browse the repository at this point in the history
Track API key creation and last usage
  • Loading branch information
stevespringett authored Jan 12, 2024
2 parents 5727c59 + 34d2d16 commit fe5de79
Show file tree
Hide file tree
Showing 4 changed files with 220 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,21 @@ public ApiKey createApiKey(final Team team) {
pm.currentTransaction().begin();
final ApiKey apiKey = new ApiKey();
apiKey.setKey(ApiKeyGenerator.generate());
apiKey.setCreated(new Date());
apiKey.setTeams(teams);
pm.makePersistent(apiKey);
pm.currentTransaction().commit();
return pm.getObjectById(ApiKey.class, apiKey.getId());
}

public ApiKey updateApiKey(final ApiKey transientApiKey) {
pm.currentTransaction().begin();
final ApiKey apiKey = getObjectById(ApiKey.class, transientApiKey.getId());
apiKey.setComment(transientApiKey.getComment());
pm.currentTransaction().commit();
return pm.getObjectById(ApiKey.class, transientApiKey.getId());
}

/**
* Creates a new OidcUser object with the specified username.
* @param username The username of the new OidcUser. This must reference an
Expand Down
42 changes: 41 additions & 1 deletion alpine-model/src/main/java/alpine/model/ApiKey.java
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import javax.validation.constraints.Size;
import java.io.Serializable;
import java.security.Principal;
import java.util.Date;
import java.util.List;

/**
Expand Down Expand Up @@ -65,6 +66,19 @@ public class ApiKey implements Serializable, Principal {
message = "The API key must contain only alpha, numeric and/or underscore characters")
private String key;

@Persistent
@Column(name = "COMMENT")
@Size(max = 255)
private String comment;

@Persistent
@Column(name = "CREATED")
private Date created;

@Persistent
@Column(name = "LAST_USED")
private Date lastUsed;

@Persistent(table = "APIKEYS_TEAMS", defaultFetchGroup = "true")
@Join(column = "APIKEY_ID")
@Element(column = "TEAM_ID")
Expand All @@ -91,6 +105,7 @@ public void setKey(String key) {
/**
* Masks all key characters except the prefix and last four characters with *. If the key does not have the
* currently configured prefix, do not return it.
*
* @return Masked key.
*/
public String getMaskedKey() {
Expand All @@ -109,15 +124,40 @@ public String getMaskedKey() {

/**
* Do not use - only here to satisfy Principal implementation requirement.
* @deprecated use {@link UserPrincipal#getUsername()}
*
* @return a String presentation of the username
* @deprecated use {@link UserPrincipal#getUsername()}
*/
@Deprecated
@JsonIgnore
public String getName() {
return getMaskedKey();
}

public String getComment() {
return comment;
}

public void setComment(final String comment) {
this.comment = comment;
}

public Date getCreated() {
return created;
}

public void setCreated(final Date created) {
this.created = created;
}

public Date getLastUsed() {
return lastUsed;
}

public void setLastUsed(final Date lastUsed) {
this.lastUsed = lastUsed;
}

public List<Team> getTeams() {
return teams;
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
/*
* This file is part of Alpine.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* Copyright (c) Steve Springett. All Rights Reserved.
*/
package alpine.server.filters;

import alpine.common.logging.Logger;
import alpine.event.framework.LoggableUncaughtExceptionHandler;
import alpine.model.ApiKey;
import alpine.persistence.AlpineQueryManager;
import org.apache.commons.lang3.concurrent.BasicThreadFactory;
import org.glassfish.jersey.server.monitoring.ApplicationEvent;
import org.glassfish.jersey.server.monitoring.ApplicationEventListener;
import org.glassfish.jersey.server.monitoring.RequestEvent;
import org.glassfish.jersey.server.monitoring.RequestEventListener;

import javax.jdo.PersistenceManager;
import javax.jdo.PersistenceManagerFactory;
import javax.jdo.datastore.JDOConnection;
import javax.ws.rs.ext.Provider;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

@Provider
public class ApiKeyUsageTracker implements ApplicationEventListener {

private record ApiKeyUsedEvent(long keyId, long timestamp) {
}

private static final Logger LOGGER = Logger.getLogger(ApiKeyUsageTracker.class);
private static final BlockingQueue<ApiKeyUsedEvent> EVENT_QUEUE = new ArrayBlockingQueue<>(10_000);

private final ScheduledExecutorService flushExecutor;
private final Lock flushLock;

public ApiKeyUsageTracker() {
final var threadFactory = new BasicThreadFactory.Builder()
.uncaughtExceptionHandler(new LoggableUncaughtExceptionHandler())
.namingPattern("Alpine-ApiKeyUsageTracker-%d")
.build();
this.flushExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory);
this.flushLock = new ReentrantLock();
}

@Override
public void onEvent(final ApplicationEvent event) {
switch (event.getType()) {
case INITIALIZATION_FINISHED -> flushExecutor.scheduleAtFixedRate(this::flush, 5, 30, TimeUnit.SECONDS);
case DESTROY_FINISHED -> {
flushExecutor.shutdown();
try {
final boolean terminated = flushExecutor.awaitTermination(5, TimeUnit.SECONDS);
if (!terminated) {
LOGGER.warn("""
Flush executor did not terminate on time (waited for 5s); \
Remaining events in the queue: %d""".formatted(EVENT_QUEUE.size()));
}
} catch (InterruptedException e) {
LOGGER.warn("Interrupted while waiting for pending flush tasks to complete");
Thread.currentThread().interrupt();
}

flush();
}
}
}

@Override
public RequestEventListener onRequest(final RequestEvent requestEvent) {
return null;
}

static void onApiKeyUsed(final ApiKey apiKey) {
final var event = new ApiKeyUsedEvent(apiKey.getId(), Instant.now().toEpochMilli());
if (!EVENT_QUEUE.offer(event)) {
// Prefer lost events over blocking when the queue is saturated.
// We do not want to add additional latency to requests.
LOGGER.debug("Usage of API key %s can not be tracked because the event queue is already saturated"
.formatted(apiKey.getMaskedKey()));
}
}

private void flush() {
try {
flushLock.lock();
if (EVENT_QUEUE.isEmpty()) {
return;
}

final var lastUsedByKeyId = new HashMap<Long, Long>();
while (EVENT_QUEUE.peek() != null) {
final ApiKeyUsedEvent event = EVENT_QUEUE.poll();
lastUsedByKeyId.compute(event.keyId(), (ignored, prev) -> {
if (prev == null) {
return event.timestamp();
}

return Math.max(prev, event.timestamp());
});
}

LOGGER.debug("Updating last used timestamps for %d API keys".formatted(lastUsedByKeyId.size()));
updateLastUsed(lastUsedByKeyId);
} catch (Exception e) {
LOGGER.error("Failed to update last used timestamps of API keys", e);
} finally {
flushLock.unlock();
}
}

private void updateLastUsed(final Map<Long, Long> lastUsedByKeyId) throws SQLException {
try (final var qm = new AlpineQueryManager()) {
final PersistenceManager pm = qm.getPersistenceManager();
final var jdoConnection = (JDOConnection) pm.getDataStoreConnection();
final var connection = (Connection) jdoConnection.getNativeConnection();
try (final PreparedStatement ps = connection.prepareStatement("""
UPDATE "APIKEY" SET "LAST_USED" = ?
WHERE "ID" = ? AND ("LAST_USED" IS NULL OR "LAST_USED" < ?)
""")) {
for (final Map.Entry<Long, Long> entry : lastUsedByKeyId.entrySet()) {
final var lastUsed = new Timestamp(entry.getValue());
ps.setTimestamp(1, lastUsed);
ps.setLong(2, entry.getKey());
ps.setTimestamp(3, lastUsed);
ps.addBatch();
}

ps.executeBatch();
} finally {
jdoConnection.close();
}

// Evict ApiKey objects from L2 cache.
// DataNucleus does the same when using the bulk UPDATE feature ¯\_(ツ)_/¯
final PersistenceManagerFactory pmf = pm.getPersistenceManagerFactory();
pmf.getDataStoreCache().evictAll(false, ApiKey.class);
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package alpine.server.filters;

import alpine.common.logging.Logger;
import alpine.model.ApiKey;
import alpine.server.auth.ApiKeyAuthenticationService;
import alpine.server.auth.JwtAuthenticationService;
import org.glassfish.jersey.server.ContainerRequest;
Expand Down Expand Up @@ -66,6 +67,9 @@ public void filter(ContainerRequestContext requestContext) {
if (apiKeyAuthService.isSpecified()) {
try {
principal = apiKeyAuthService.authenticate();
if (principal instanceof final ApiKey apiKey) {
ApiKeyUsageTracker.onApiKeyUsed(apiKey);
}
} catch (AuthenticationException e) {
LOGGER.info(SecurityMarkers.SECURITY_FAILURE, "Invalid API key asserted");
requestContext.abortWith(Response.status(Response.Status.UNAUTHORIZED).build());
Expand Down

0 comments on commit fe5de79

Please sign in to comment.