Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix!: rename dagScope to carScope #21

Merged
merged 1 commit into from
May 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 35 additions & 9 deletions index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,36 @@ export interface Network {
handle: (protocol: string | string[], handler: StreamHandler) => Promise<void>
}

export type CarScope = 'all'|'file'|'block'
/**
* Transmit the entire contiguous DAG that begins at the end of the path query,
* after blocks required to verify path segments.
*/
export type DagScopeAll = 'all'

export interface CarScopeOptions {
carScope?: CarScope
/**
* For queries that traverse UnixFS data, `entity` roughly means return blocks
* needed to verify the terminating element of the requested content path. For
* UnixFS, all the blocks needed to read an entire UnixFS file, or enumerate a
* UnixFS directory. For all queries that reference non-UnixFS data, `entity`
* is equivalent to `block`.
*/
export type DagScopeEntity = 'entity'

/**
* Only the root block at the end of the path is returned after blocks required
* to verify the specified path segments.
*/
export type DagScopeBlock = 'block'

/**
* Describes the shape of the DAG fetched at the terminus of the specified path
* whose blocks are returned after the blocks required to traverse path
* segments.
*/
export type DagScope = DagScopeAll | DagScopeEntity | DagScopeBlock

export interface DagScopeOptions {
dagScope?: DagScope
}

export interface IDagula {
Expand All @@ -42,9 +68,9 @@ export interface IDagula {
*/
get (cid: CID|string, options?: AbortOptions): AsyncIterableIterator<Block>
/**
* Get a DAG for a cid+path
* Get a DAG for a cid+path.
*/
getPath (cidPath: string, options?: AbortOptions & CarScopeOptions): AsyncIterableIterator<Block>
getPath (cidPath: string, options?: AbortOptions & DagScopeOptions): AsyncIterableIterator<Block>
/**
* Get a single block.
*/
Expand All @@ -54,7 +80,7 @@ export interface IDagula {
*/
getUnixfs (path: CID|string, options?: AbortOptions): Promise<UnixFSEntry>
/**
* Emit nodes for all path segements and get UnixFS files and directories
* Emit nodes for all path segements and get UnixFS files and directories.
*/
walkUnixfsPath (path: CID|string, options?: AbortOptions): AsyncIterableIterator<UnixFSEntry>
}
Expand All @@ -66,9 +92,9 @@ export declare class Dagula implements IDagula {
*/
get (cid: CID|string, options?: AbortOptions): AsyncIterableIterator<Block>
/**
* Get a DAG for a cid+path
* Get a DAG for a cid+path.
*/
getPath (cidPath: string, options?: AbortOptions & CarScopeOptions): AsyncIterableIterator<Block>
getPath (cidPath: string, options?: AbortOptions & DagScopeOptions): AsyncIterableIterator<Block>
/**
* Get a single block.
*/
Expand All @@ -78,7 +104,7 @@ export declare class Dagula implements IDagula {
*/
getUnixfs (path: CID|string, options?: AbortOptions): Promise<UnixFSEntry>
/**
* Emit nodes for all path segements and get UnixFS files and directories
* Emit nodes for all path segements and get UnixFS files and directories.
*/
walkUnixfsPath (path: CID|string, options?: AbortOptions): AsyncIterableIterator<UnixFSEntry>
}
16 changes: 8 additions & 8 deletions index.js
Original file line number Diff line number Diff line change
Expand Up @@ -93,24 +93,24 @@ export class Dagula {

/**
* Yield all blocks traversed to resolve the ipfs path.
* Then use carScope to determine the set of blocks of the targeted dag to yield.
* Then use dagScope to determine the set of blocks of the targeted dag to yield.
* Yield all blocks by default.
* Use carScope: 'block' to yield the termimal block.
* Use carScope: 'file' to yield all the blocks of a unixfs file, or enough blocks to list a directory.
* Use dagScope: 'block' to yield the termimal block.
* Use dagScope: 'entity' to yield all the blocks of a unixfs file, or enough blocks to list a directory.
*
* @param {string} cidPath
* @param {object} [options]
* @param {AbortSignal} [options.signal]
* @param {'dfs'|'unk'} [options.order] Specify desired block ordering. `dfs` - Depth First Search, `unk` - unknown ordering.
* @param {'all'|'file'|'block'} [options.carScope] control how many layers of the dag are returned
* @param {import('./index').DagScope} [options.dagScope] control how many layers of the dag are returned
* 'all': return the entire dag starting at path. (default)
* 'block': return the block identified by the path.
* 'file': Mimic gateway semantics: Return All blocks for a multi-block file or just enough blocks to enumerate a dir/map but not the dir contents.
* 'entity': Mimic gateway semantics: Return All blocks for a multi-block file or just enough blocks to enumerate a dir/map but not the dir contents.
* Where path points to a single block file, all three selectors would return the same thing.
* where path points to a sharded hamt: 'file' returns the blocks of the hamt so the dir can be listed. 'block' returns the root block of the hamt.
*/
async * getPath (cidPath, options = {}) {
const carScope = options.carScope ?? 'all'
const dagScope = options.dagScope ?? 'all'

/**
* The resolved dag root at the terminus of the cidPath
Expand Down Expand Up @@ -146,15 +146,15 @@ export class Dagula {
traversed = []
}

if (carScope === 'all' || (carScope === 'file' && base.type !== 'directory')) {
if (dagScope === 'all' || (dagScope === 'entity' && base.type !== 'directory')) {
const links = getLinks(base, this.#decoders)
// fetch the entire dag rooted at the end of the provided path
if (links.length) {
yield * this.get(links, { signal: options.signal, order: options.order })
}
}
// non-files, like directories, and IPLD Maps only return blocks necessary for their enumeration
if (carScope === 'file' && base.type === 'directory') {
if (dagScope === 'entity' && base.type === 'directory') {
// the single block for the root has already been yielded.
// For a hamt we must fetch all the blocks of the (current) hamt.
if (base.unixfs.type === 'hamt-sharded-directory') {
Expand Down
48 changes: 24 additions & 24 deletions test/getPath.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ test('should getPath through identity encoded dag-cbor', async t => {
t.deepEqual(blocks.at(1).bytes, fileNode.bytes)
})

test('should getPath on file with carScope=file', async t => {
test('should getPath on file with dagScope=entity', async t => {
// return all blocks in path and all blocks for resolved target of path
const filePart1 = await Block.decode({ codec: raw, bytes: fromString(`MORE TEST DATA ${Date.now()}`), hasher: sha256 })
const filePart2 = await Block.decode({ codec: raw, bytes: fromString(`EVEN MORE TEST DATA ${Date.now()}`), hasher: sha256 })
Expand Down Expand Up @@ -184,8 +184,8 @@ test('should getPath on file with carScope=file', async t => {
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })

const blocks = []
const carScope = 'file'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { carScope })) {
const dagScope = 'entity'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { dagScope })) {
blocks.push(entry)
}
// did not try and return block for `other`
Expand All @@ -200,7 +200,7 @@ test('should getPath on file with carScope=file', async t => {
t.deepEqual(blocks.at(3).bytes, filePart2.bytes)
})

test('should getPath on large file with carScope=file, default ordering', async t => {
test('should getPath on large file with dagScope=entity, default ordering', async t => {
// return all blocks in path and all blocks for resolved target of path
const filePart1 = await Block.decode({ codec: raw, bytes: fromString(`MORE TEST DATA ${Date.now()}`), hasher: sha256 })
const filePart2 = await Block.decode({ codec: raw, bytes: fromString(`EVEN MORE TEST DATA ${Date.now()}`), hasher: sha256 })
Expand Down Expand Up @@ -259,8 +259,8 @@ test('should getPath on large file with carScope=file, default ordering', async
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })

const blocks = []
const carScope = 'file'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { carScope })) {
const dagScope = 'entity'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { dagScope })) {
blocks.push(entry)
}
// did not try and return block for `other`
Expand All @@ -283,7 +283,7 @@ test('should getPath on large file with carScope=file, default ordering', async
t.deepEqual(blocks.at(7).bytes, filePart4.bytes)
})

test('should getPath on large file with carScope=file, dfs ordering', async t => {
test('should getPath on large file with dagScope=entity, dfs ordering', async t => {
// return all blocks in path and all blocks for resolved target of path
const filePart1 = await Block.decode({ codec: raw, bytes: fromString(`MORE TEST DATA ${Date.now()}`), hasher: sha256 })
const filePart2 = await Block.decode({ codec: raw, bytes: fromString(`EVEN MORE TEST DATA ${Date.now()}`), hasher: sha256 })
Expand Down Expand Up @@ -342,8 +342,8 @@ test('should getPath on large file with carScope=file, dfs ordering', async t =>
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })

const blocks = []
const carScope = 'file'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { carScope, order: 'dfs' })) {
const dagScope = 'entity'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { dagScope, order: 'dfs' })) {
blocks.push(entry)
}
// did not try and return block for `other`
Expand All @@ -365,7 +365,7 @@ test('should getPath on large file with carScope=file, dfs ordering', async t =>
t.deepEqual(blocks.at(7).cid, filePart4.cid)
t.deepEqual(blocks.at(7).bytes, filePart4.bytes)
})
test('should getPath on file with carScope=block', async t => {
test('should getPath on file with dagScope=block', async t => {
// return all blocks in path and all blocks for resolved target of path
const filePart1 = await Block.decode({ codec: raw, bytes: fromString(`MORE TEST DATA ${Date.now()}`), hasher: sha256 })
const filePart2 = await Block.decode({ codec: raw, bytes: fromString(`EVEN MORE TEST DATA ${Date.now()}`), hasher: sha256 })
Expand Down Expand Up @@ -398,8 +398,8 @@ test('should getPath on file with carScope=block', async t => {
const libp2p = await getLibp2p()
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })
const blocks = []
const carScope = 'block'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { carScope })) {
const dagScope = 'block'
for await (const entry of dagula.getPath(`${dirNode.cid}/foo`, { dagScope })) {
blocks.push(entry)
}
// did not try and return block for `other`
Expand All @@ -410,7 +410,7 @@ test('should getPath on file with carScope=block', async t => {
t.deepEqual(blocks.at(1).bytes, fileNode.bytes)
})

test('should getPath on dir with carScope=file', async t => {
test('should getPath on dir with dagScope=file', async t => {
// return all blocks in path. as it's a dir, it should stop there
const file = await Block.decode({ codec: raw, bytes: fromString(`MORE TEST DATA ${Date.now()}`), hasher: sha256 })

Expand All @@ -431,16 +431,16 @@ test('should getPath on dir with carScope=file', async t => {
const libp2p = await getLibp2p()
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })
const blocks = []
for await (const entry of dagula.getPath(`${dirNode.cid}`, { carScope: 'file' })) {
for await (const entry of dagula.getPath(`${dirNode.cid}`, { dagScope: 'entity' })) {
blocks.push(entry)
}
// only return the dir if carScope=file and target is a dir
// only return the dir if dagScope=entity and target is a dir
t.is(blocks.length, 1)
t.deepEqual(blocks.at(0).cid, dirNode.cid)
t.deepEqual(blocks.at(0).bytes, dirNode.bytes)
})

test('should getPath to a hamt dir with carScope=file', async t => {
test('should getPath to a hamt dir with dagScope=entity', async t => {
const { readable, writable } = new TransformStream(undefined, UnixFS.withCapacity(1048576 * 32))
const writer = writable.getWriter()

Expand All @@ -459,16 +459,16 @@ test('should getPath to a hamt dir with carScope=file', async t => {
const libp2p = await getLibp2p()
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })
const blocks = []
for await (const entry of dagula.getPath(`${dirLink.cid}`, { carScope: 'file' })) {
for await (const entry of dagula.getPath(`${dirLink.cid}`, { dagScope: 'entity' })) {
blocks.push(entry)
}

// only return the dir if carScope=file and target is a dir
// only return the dir if dagScope=entity and target is a dir
t.is(blocks.length, 1)
t.deepEqual(blocks.at(0).cid, dirLink.cid)
})

test('should getPath to a sharded hamt dir with carScope=file', async t => {
test('should getPath to a sharded hamt dir with dagScope=entity', async t => {
const { readable, writable } = new TransformStream(undefined, UnixFS.withCapacity(1048576 * 32))
const writer = writable.getWriter()

Expand All @@ -494,17 +494,17 @@ test('should getPath to a sharded hamt dir with carScope=file', async t => {
const libp2p = await getLibp2p()
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })
const blocks = []
for await (const block of dagula.getPath(`${dirLink.cid}`, { carScope: 'file' })) {
for await (const block of dagula.getPath(`${dirLink.cid}`, { dagScope: 'entity' })) {
blocks.push(block)
}

// return only the dir if carScope=file and target is a dir. file block should be missing
// return only the dir if dagScope=entity and target is a dir. file block should be missing
t.is(blocks.length, allBlocks.length - 1, 'all blocks for sharded dir were included')
t.deepEqual(blocks[0].cid, dirLink.cid, 'first block is root of dir')
t.false(blocks.some(b => b.cid.toString() === fileLink.cid.toString()), 'linked file was not returned because carScope: file')
t.false(blocks.some(b => b.cid.toString() === fileLink.cid.toString()), 'linked file was not returned because dagScope: entity')
})

test('should getPath through sharded hamt dir with carScope=file', async t => {
test('should getPath through sharded hamt dir with dagScope=entity', async t => {
const { readable, writable } = new TransformStream(undefined, UnixFS.withCapacity(1048576 * 32))
const writer = writable.getWriter()

Expand All @@ -530,7 +530,7 @@ test('should getPath through sharded hamt dir with carScope=file', async t => {
const libp2p = await getLibp2p()
const dagula = await fromNetwork(libp2p, { peer: peer.libp2p.getMultiaddrs()[0] })
const blocks = []
for await (const block of dagula.getPath(`${dirLink.cid}/foo`, { carScope: 'file' })) {
for await (const block of dagula.getPath(`${dirLink.cid}/foo`, { dagScope: 'entity' })) {
blocks.push(block)
}

Expand Down