Skip to content

Commit

Permalink
New API for iterating DiskLruCache contents.
Browse files Browse the repository at this point in the history
For #853
  • Loading branch information
squarejesse committed Dec 29, 2014
1 parent f1a27df commit 755bd91
Show file tree
Hide file tree
Showing 2 changed files with 201 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Deque;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.Executor;
import okio.BufferedSink;
import okio.BufferedSource;
Expand Down Expand Up @@ -949,6 +951,108 @@ private void createNewCacheWithSize(int maxSize) throws IOException {
a.close();
}

@Test public void iterator() throws Exception {
set("a", "a1", "a2");
set("b", "b1", "b2");
set("c", "c1", "c2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();

assertTrue(iterator.hasNext());
DiskLruCache.Snapshot a = iterator.next();
assertEquals("a", a.key());
assertEquals("a1", a.getString(0));
assertEquals("a2", a.getString(1));

assertTrue(iterator.hasNext());
DiskLruCache.Snapshot b = iterator.next();
assertEquals("b", b.key());
assertEquals("b1", b.getString(0));
assertEquals("b2", b.getString(1));

assertTrue(iterator.hasNext());
DiskLruCache.Snapshot c = iterator.next();
assertEquals("c", c.key());
assertEquals("c1", c.getString(0));
assertEquals("c2", c.getString(1));

assertFalse(iterator.hasNext());
try {
iterator.next();
fail();
} catch (NoSuchElementException expected) {
}
}

@Test public void iteratorElementsAddedDuringIterationAreOmitted() throws Exception {
set("a", "a1", "a2");
set("b", "b1", "b2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();

assertEquals("a", iterator.next().key());

set("c", "c1", "c2");

assertEquals("b", iterator.next().key());
assertFalse(iterator.hasNext());
}

@Test public void iteratorElementsUpdatedDuringIterationAreUpdated() throws Exception {
set("a", "a1", "a2");
set("b", "b1", "b2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();

assertEquals("a", iterator.next().key());

set("b", "b3", "b4");

DiskLruCache.Snapshot b = iterator.next();
assertEquals("b", b.key());
assertEquals("b3", b.getString(0));
assertEquals("b4", b.getString(1));
}

@Test public void iteratorElementsRemovedDuringIterationAreOmitted() throws Exception {
set("a", "a1", "a2");
set("b", "b1", "b2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();

cache.remove("b");

assertEquals("a", iterator.next().key());

assertFalse(iterator.hasNext());
}

@Test public void iteratorRemove() throws Exception {
set("a", "a1", "a2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();
iterator.next();
iterator.remove();
assertEquals(null, cache.get("a"));
}

@Test public void iteratorRemoveBeforeNext() throws Exception {
set("a", "a1", "a2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();
try {
iterator.remove();
fail();
} catch (IllegalStateException expected) {
}
}

@Test public void iteratorRemoveOncePerCallToNext() throws Exception {
set("a", "a1", "a2");
Iterator<DiskLruCache.Snapshot> iterator = cache.snapshots();
iterator.next();
iterator.remove();
try {
iterator.remove();
fail();
} catch (IllegalStateException expected) {
}
}

private void assertJournalEquals(String... expectedBodyLines) throws Exception {
List<String> expectedLines = new ArrayList<>();
expectedLines.add(MAGIC);
Expand Down
124 changes: 97 additions & 27 deletions okhttp/src/main/java/com/squareup/okhttp/internal/DiskLruCache.java
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.NoSuchElementException;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
Expand Down Expand Up @@ -411,41 +413,18 @@ public synchronized Snapshot get(String key) throws IOException {
checkNotClosed();
validateKey(key);
Entry entry = lruEntries.get(key);
if (entry == null) {
return null;
}
if (entry == null || !entry.readable) return null;

if (!entry.readable) {
return null;
}

// Open all streams eagerly to guarantee that we see a single published
// snapshot. If we opened streams lazily then the streams could come
// from different edits.
Source[] sources = new Source[valueCount];
try {
for (int i = 0; i < valueCount; i++) {
sources[i] = Okio.source(entry.cleanFiles[i]);
}
} catch (FileNotFoundException e) {
// A file must have been deleted manually!
for (int i = 0; i < valueCount; i++) {
if (sources[i] != null) {
Util.closeQuietly(sources[i]);
} else {
break;
}
}
return null;
}
Snapshot snapshot = entry.snapshot();
if (snapshot == null) return null;

redundantOpCount++;
journalWriter.writeUtf8(READ).writeByte(' ').writeUtf8(key).writeByte('\n');
if (journalRebuildRequired()) {
executor.execute(cleanupRunnable);
}

return new Snapshot(key, entry.sequenceNumber, sources, entry.lengths);
return snapshot;
}

/**
Expand Down Expand Up @@ -697,6 +676,66 @@ private static String sourceToString(Source in) throws IOException {
}
}

/**
* Returns an iterator over the cache's current entries. This iterator doesn't throw {@code
* ConcurrentModificationException}, but if new entries are added while iterating, those new
* entries will not be returned by the iterator. If existing entries are removed during iteration,
* they will be absent (unless they were already returned).
*
* <p>If there are I/O problems during iteration, this iterator fails silently. For example, if
* the hosting filesystem becomes unreachable, the iterator will omit elements rather than
* throwing exceptions.
*
* <p>The returned iterator supports {@link Iterator#remove}.
*/
public synchronized Iterator<Snapshot> snapshots() {
return new Iterator<Snapshot>() {
/** Iterate a copy of the entries to defend against concurrent modification errors. */
final Iterator<Entry> delegate = new ArrayList<>(lruEntries.values()).iterator();

/** The snapshot to return from {@link #next}. Null if we haven't computed that yet. */
Snapshot nextSnapshot;

/** The snapshot to remove with {@link #remove}. Null if removal is illegal. */
Snapshot removeSnapshot;

@Override public boolean hasNext() {
if (nextSnapshot != null) return true;

synchronized (DiskLruCache.this) {
while (delegate.hasNext()) {
Entry entry = delegate.next();
Snapshot snapshot = entry.snapshot();
if (snapshot == null) continue; // Evicted since we copied the entries.
nextSnapshot = snapshot;
return true;
}
}

return false;
}

@Override public Snapshot next() {
if (!hasNext()) throw new NoSuchElementException();
removeSnapshot = nextSnapshot;
nextSnapshot = null;
return removeSnapshot;
}

@Override public void remove() {
if (removeSnapshot == null) throw new IllegalStateException("remove() before next()");
try {
DiskLruCache.this.remove(removeSnapshot.key);
} catch (IOException ignored) {
// Nothing useful to do here. We failed to remove from the cache. Most likely that's
// because we couldn't update the journal, but the cached entry will still be gone.
} finally {
removeSnapshot = null;
}
}
};
}

/** A snapshot of the values for an entry. */
public final class Snapshot implements Closeable {
private final String key;
Expand All @@ -711,6 +750,10 @@ private Snapshot(String key, long sequenceNumber, Source[] sources, long[] lengt
this.lengths = lengths;
}

public String key() {
return key;
}

/**
* Returns an editor for this snapshot's entry, or null if either the
* entry has changed since this snapshot was created or if another edit
Expand Down Expand Up @@ -974,5 +1017,32 @@ void writeLengths(BufferedSink writer) throws IOException {
private IOException invalidLengths(String[] strings) throws IOException {
throw new IOException("unexpected journal line: " + Arrays.toString(strings));
}

/**
* Returns a snapshot of this entry. This opens all streams eagerly to guarantee that we see a
* single published snapshot. If we opened streams lazily then the streams could come from
* different edits.
*/
Snapshot snapshot() {
if (!Thread.holdsLock(DiskLruCache.this)) throw new AssertionError();

Source[] sources = new Source[valueCount];
try {
for (int i = 0; i < valueCount; i++) {
sources[i] = Okio.source(cleanFiles[i]);
}
return new Snapshot(key, sequenceNumber, sources, lengths);
} catch (FileNotFoundException e) {
// A file must have been deleted manually!
for (int i = 0; i < valueCount; i++) {
if (sources[i] != null) {
Util.closeQuietly(sources[i]);
} else {
break;
}
}
return null;
}
}
}
}

0 comments on commit 755bd91

Please sign in to comment.