From 89c3602ff651b19710b06becc686d0a0664d3c38 Mon Sep 17 00:00:00 2001 From: David Dias Date: Fri, 21 Oct 2016 10:03:04 +0100 Subject: [PATCH] feat: migrate importer to use IPLD Resolver and the new IPLD format async interface --- README.md | 8 + package.json | 20 ++- src/chunker-fixed-size.js | 7 - src/chunker/fixed-size.js | 7 + src/{exporters => exporter}/dir.js | 7 +- src/{exporters => exporter}/file.js | 5 +- src/{exporter.js => exporter/index.js} | 19 ++- src/importer.js | 155 ------------------ src/importer/flush-tree.js | 167 +++++++++++++++++++ src/importer/index.js | 217 +++++++++++++++++++++++++ src/index.js | 4 +- src/tree.js | 130 --------------- test/test-exporter.js | 39 +++-- test/test-fixed-size-chunker.js | 2 +- test/test-importer.js | 24 +-- 15 files changed, 465 insertions(+), 346 deletions(-) delete mode 100644 src/chunker-fixed-size.js create mode 100644 src/chunker/fixed-size.js rename src/{exporters => exporter}/dir.js (81%) rename src/{exporters => exporter}/file.js (83%) rename src/{exporter.js => exporter/index.js} (54%) delete mode 100644 src/importer.js create mode 100644 src/importer/flush-tree.js create mode 100644 src/importer/index.js delete mode 100644 src/tree.js diff --git a/README.md b/README.md index 27e91107..19592aed 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,14 @@ IPFS unixFS Engine - [Contribute](#contribute) - [License](#license) +## BEWARE BEWARE BEWARE there might be 🐉 + +This module has passed through several iterations and still is far from a nice and easy understandable codebase. Currently missing features: + +- tar importer +- trickle dag exporter +- sharding + ## Install With [npm](https://npmjs.org/) installed, run diff --git a/package.json b/package.json index 887a1e91..6a6b1510 100644 --- a/package.json +++ b/package.json @@ -34,31 +34,33 @@ }, "homepage": "https://github.com/ipfs/js-ipfs-unixfs-engineg#readme", "devDependencies": { - "aegir": "^8.0.1", + "aegir": "^8.1.2", "buffer-loader": "0.0.1", "chai": "^3.5.0", - "fs-pull-blob-store": "^0.3.0", + "fs-pull-blob-store": "^0.4.1", "idb-pull-blob-store": "^0.5.1", - "ipfs-block-service": "^0.5.0", - "ipfs-repo": "^0.9.0", + "ipfs-block-service": "^0.6.0", + "ipfs-repo": "^0.10.0", "ncp": "^2.0.0", "pre-commit": "^1.1.3", - "pull-zip": "^2.0.0", + "pull-zip": "^2.0.1", "raw-loader": "^0.5.1", "rimraf": "^2.5.4", "run-series": "^1.1.4" }, "dependencies": { - "ipfs-merkle-dag": "^0.7.0", + "cids": "^0.2.0", "ipfs-unixfs": "^0.1.4", - "is-ipfs": "^0.2.0", + "ipld-dag-pb": "^0.1.3", + "ipld-resolver": "^0.1.1", + "is-ipfs": "^0.2.1", "multihashes": "^0.2.2", "pull-block": "^1.0.2", - "pull-paramap": "^1.1.6", + "pull-paramap": "^1.2.0", "pull-pushable": "^2.0.1", "pull-stream": "^3.4.5", "pull-traverse": "^1.0.3", - "pull-write": "^1.1.0", + "pull-write": "^1.1.1", "run-parallel": "^1.1.6" }, "contributors": [ diff --git a/src/chunker-fixed-size.js b/src/chunker-fixed-size.js deleted file mode 100644 index 2d0663ad..00000000 --- a/src/chunker-fixed-size.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' - -const block = require('pull-block') - -exports = module.exports = function (size) { - return block(size, {zeroPadding: false}) -} diff --git a/src/chunker/fixed-size.js b/src/chunker/fixed-size.js new file mode 100644 index 00000000..0533a389 --- /dev/null +++ b/src/chunker/fixed-size.js @@ -0,0 +1,7 @@ +'use strict' + +const pullBlock = require('pull-block') + +module.exports = (size) => { + return pullBlock(size, { zeroPadding: false }) +} diff --git a/src/exporters/dir.js b/src/exporter/dir.js similarity index 81% rename from src/exporters/dir.js rename to src/exporter/dir.js index 75aa08f6..dbb4d361 100644 --- a/src/exporters/dir.js +++ b/src/exporter/dir.js @@ -3,12 +3,13 @@ const path = require('path') const pull = require('pull-stream') const paramap = require('pull-paramap') +const CID = require('cids') const fileExporter = require('./file') const switchType = require('../util').switchType // Logic to export a unixfs directory. -module.exports = (node, name, dagService) => { +module.exports = (node, name, ipldResolver) => { // The algorithm below is as follows // // 1. Take all links from a given directory node @@ -25,7 +26,7 @@ module.exports = (node, name, dagService) => { path: path.join(name, link.name), hash: link.hash })), - paramap((item, cb) => dagService.get(item.hash, (err, n) => { + paramap((item, cb) => ipldResolver.get(new CID(item.hash), (err, n) => { if (err) { return cb(err) } @@ -33,7 +34,7 @@ module.exports = (node, name, dagService) => { cb(null, switchType( n, () => pull.values([item]), - () => fileExporter(n, item.path, dagService) + () => fileExporter(n, item.path, ipldResolver) )) })), pull.flatten() diff --git a/src/exporters/file.js b/src/exporter/file.js similarity index 83% rename from src/exporters/file.js rename to src/exporter/file.js index 49deb561..0595efa7 100644 --- a/src/exporters/file.js +++ b/src/exporter/file.js @@ -2,11 +2,12 @@ const traverse = require('pull-traverse') const UnixFS = require('ipfs-unixfs') +const CID = require('cids') const pull = require('pull-stream') const paramap = require('pull-paramap') // Logic to export a single (possibly chunked) unixfs file. -module.exports = (node, name, ds) => { +module.exports = (node, name, ipldResolver) => { function getData (node) { try { const file = UnixFS.unmarshal(node.data) @@ -19,7 +20,7 @@ module.exports = (node, name, ds) => { function visitor (node) { return pull( pull.values(node.links), - paramap((link, cb) => ds.get(link.hash, cb)) + paramap((link, cb) => ipldResolver.get(new CID(link.hash), cb)) ) } diff --git a/src/exporter.js b/src/exporter/index.js similarity index 54% rename from src/exporter.js rename to src/exporter/index.js index 6489bd01..e87970fb 100644 --- a/src/exporter.js +++ b/src/exporter/index.js @@ -2,25 +2,26 @@ const traverse = require('pull-traverse') const pull = require('pull-stream') +const CID = require('cids') -const util = require('./util') +const util = require('./../util') const switchType = util.switchType const cleanMultihash = util.cleanMultihash -const dirExporter = require('./exporters/dir') -const fileExporter = require('./exporters/file') +const dirExporter = require('./dir') +const fileExporter = require('./file') -module.exports = (hash, dagService, options) => { +module.exports = (hash, ipldResolver, options) => { hash = cleanMultihash(hash) options = options || {} function visitor (item) { return pull( - dagService.getStream(item.hash), + ipldResolver.getStream(new CID(item.hash)), pull.map((node) => switchType( node, - () => dirExporter(node, item.path, dagService), - () => fileExporter(node, item.path, dagService) + () => dirExporter(node, item.path, ipldResolver), + () => fileExporter(node, item.path, ipldResolver) )), pull.flatten() ) @@ -28,11 +29,11 @@ module.exports = (hash, dagService, options) => { // Traverse the DAG return pull( - dagService.getStream(hash), + ipldResolver.getStream(new CID(hash)), pull.map((node) => switchType( node, () => traverse.widthFirst({path: hash, hash}, visitor), - () => fileExporter(node, hash, dagService) + () => fileExporter(node, hash, ipldResolver) )), pull.flatten() ) diff --git a/src/importer.js b/src/importer.js deleted file mode 100644 index c832119f..00000000 --- a/src/importer.js +++ /dev/null @@ -1,155 +0,0 @@ -'use strict' - -const merkleDAG = require('ipfs-merkle-dag') -const UnixFS = require('ipfs-unixfs') -const assert = require('assert') -const pull = require('pull-stream') -const pushable = require('pull-pushable') -const write = require('pull-write') -const parallel = require('run-parallel') - -const fsc = require('./chunker-fixed-size') -const createAndStoreTree = require('./tree') - -const DAGNode = merkleDAG.DAGNode - -const CHUNK_SIZE = 262144 - -module.exports = (dagService, options) => { - assert(dagService, 'Missing dagService') - - const files = [] - - const source = pushable() - const sink = write( - makeWriter(source, files, dagService), - null, - 100, - (err) => { - if (err) return source.end(err) - - createAndStoreTree(files, dagService, source, () => { - source.end() - }) - } - ) - - return {source, sink} -} - -function makeWriter (source, files, dagService) { - return (items, cb) => { - parallel(items.map((item) => (cb) => { - if (!item.content) { - return createAndStoreDir(item, dagService, (err, node) => { - if (err) return cb(err) - source.push(node) - files.push(node) - cb() - }) - } - - createAndStoreFile(item, dagService, (err, node) => { - if (err) return cb(err) - source.push(node) - files.push(node) - cb() - }) - }), cb) - } -} - -function createAndStoreDir (item, ds, cb) { - // 1. create the empty dir dag node - // 2. write it to the dag store - - const d = new UnixFS('directory') - const n = new DAGNode() - n.data = d.marshal() - - ds.put(n, (err) => { - if (err) return cb(err) - cb(null, { - path: item.path, - multihash: n.multihash(), - size: n.size() - // dataSize: d.fileSize() - }) - }) -} - -function createAndStoreFile (file, ds, cb) { - if (Buffer.isBuffer(file.content)) { - file.content = pull.values([file.content]) - } - - if (typeof file.content !== 'function') { - return cb(new Error('invalid content')) - } - - // 1. create the unixfs merkledag node - // 2. add its hash and size to the leafs array - - // TODO - Support really large files - // a) check if we already reach max chunks if yes - // a.1) create a parent node for all of the current leaves - // b.2) clean up the leaves array and add just the parent node - - pull( - file.content, - fsc(CHUNK_SIZE), - pull.asyncMap((chunk, cb) => { - const l = new UnixFS('file', Buffer(chunk)) - const n = new DAGNode(l.marshal()) - - ds.put(n, (err) => { - if (err) { - return cb(new Error('Failed to store chunk')) - } - - cb(null, { - Hash: n.multihash(), - Size: n.size(), - leafSize: l.fileSize(), - Name: '' - }) - }) - }), - pull.collect((err, leaves) => { - if (err) return cb(err) - - if (leaves.length === 1) { - return cb(null, { - path: file.path, - multihash: leaves[0].Hash, - size: leaves[0].Size - // dataSize: leaves[0].leafSize - }) - } - - // create a parent node and add all the leafs - - const f = new UnixFS('file') - const n = new merkleDAG.DAGNode() - - for (let leaf of leaves) { - f.addBlockSize(leaf.leafSize) - n.addRawLink( - new merkleDAG.DAGLink(leaf.Name, leaf.Size, leaf.Hash) - ) - } - - n.data = f.marshal() - ds.put(n, (err) => { - if (err) return cb(err) - - cb(null, { - path: file.path, - multihash: n.multihash(), - size: n.size() - // dataSize: f.fileSize() - }) - }) - }) - ) -} diff --git a/src/importer/flush-tree.js b/src/importer/flush-tree.js new file mode 100644 index 00000000..e71395e7 --- /dev/null +++ b/src/importer/flush-tree.js @@ -0,0 +1,167 @@ +'use strict' + +const mh = require('multihashes') +const UnixFS = require('ipfs-unixfs') +const CID = require('cids') +const dagPB = require('ipld-dag-pb') +const mapValues = require('async/mapValues') +const parallel = require('async/parallel') + +const DAGLink = dagPB.DAGLink +const DAGNode = dagPB.DAGNode + +module.exports = (files, ipldResolver, source, callback) => { + // 1) convert files to a tree + const fileTree = createTree(files) + + if (Object.keys(fileTree).length === 0) { + return callback()// no dirs to be created + } + + // 2) create sizeIndex + const sizeIndex = createSizeIndex(files) + + // 3) bottom up flushing + traverse(fileTree, sizeIndex, null, ipldResolver, source, callback) +} + +/* + * createTree + * + * received an array of files with the format: + * { + * path: // full path + * multihash: // multihash of the dagNode + * size: // cumulative size + * } + * + * returns a JSON object that represents a tree where branches are the paths + * and the leaves are objects with file names and respective multihashes, such + * as: + * { + * foo: { + * bar: { + * baz.txt: + * } + * } + * } + */ +function createTree (files) { + const fileTree = {} + + files.forEach((file) => { + let splitted = file.path.split('/') + if (splitted.length === 1) { + return // adding just one file + } + if (splitted[0] === '') { + splitted = splitted.slice(1) + } + var tmpTree = fileTree + + for (var i = 0; i < splitted.length; i++) { + if (!tmpTree[splitted[i]]) { + tmpTree[splitted[i]] = {} + } + if (i === splitted.length - 1) { + tmpTree[splitted[i]] = file.multihash + } else { + tmpTree = tmpTree[splitted[i]] + } + } + }) + + return fileTree +} + +/* + * create a size index that goes like: + * { : } + */ +function createSizeIndex (files) { + const sizeIndex = {} + + files.forEach((file) => { + sizeIndex[mh.toB58String(file.multihash)] = file.size + }) + + return sizeIndex +} + +/* + * expand the branches recursively (depth first), flush them first + * and then traverse through the bottoum up, flushing everynode + * + * Algorithm tl;dr; + * create a dirNode + * Object.keys + * If the value is an Object + * create a dir Node + * Object.keys + * Once finished, add the result as a link to the dir node + * If the value is not an object + * add as a link to the dirNode + */ +function traverse (tree, sizeIndex, path, ipldResolver, source, done) { + mapValues(tree, (node, key, cb) => { + if (isLeaf(node)) { + return cb(null, node) + } + + traverse(node, sizeIndex, path ? `${path}/${key}` : key, ipldResolver, source, cb) + }, (err, tree) => { + if (err) { + return done(err) + } + + // at this stage, all keys are multihashes + // create a dir node + // add all the multihashes as links + // return this new node multihash + + const keys = Object.keys(tree) + + const ufsDir = new UnixFS('directory') + const node = new DAGNode(ufsDir.marshal()) + + keys.forEach((key) => { + const b58mh = mh.toB58String(tree[key]) + const link = new DAGLink(key, sizeIndex[b58mh], tree[key]) + node.addRawLink(link) + }) + + parallel([ + (cb) => node.multihash(cb), + (cb) => node.size(cb) + ], (err, res) => { + if (err) { + return done(err) + } + + const multihash = res[0] + const size = res[1] + + sizeIndex[mh.toB58String(multihash)] = size + ipldResolver.put({ + node: node, + cid: new CID(multihash) + }, (err) => { + if (err) { + source.push(new Error('failed to store dirNode')) + } else if (path) { + source.push({ + path: path, + multihash: multihash, + size: size + }) + } + + done(null, multihash) + }) + }) + }) +} + +function isLeaf (value) { + return !(typeof value === 'object' && !Buffer.isBuffer(value)) +} diff --git a/src/importer/index.js b/src/importer/index.js new file mode 100644 index 00000000..bcf770d7 --- /dev/null +++ b/src/importer/index.js @@ -0,0 +1,217 @@ +'use strict' + +const UnixFS = require('ipfs-unixfs') +const assert = require('assert') +const pull = require('pull-stream') +const pullPushable = require('pull-pushable') +const pullWrite = require('pull-write') +const parallel = require('run-parallel') +const dagPB = require('ipld-dag-pb') +const CID = require('cids') + +const fsc = require('./../chunker/fixed-size') +const createAndStoreTree = require('./flush-tree') + +const DAGNode = dagPB.DAGNode +const DAGLink = dagPB.DAGLink + +const CHUNK_SIZE = 262144 + +module.exports = (ipldResolver, options) => { + assert(ipldResolver, 'Missing IPLD Resolver') + + const files = [] + + const source = pullPushable() + + const sink = pullWrite( + makeWriter(source, files, ipldResolver), + null, + 100, + (err) => { + if (err) { + return source.end(err) + } + + createAndStoreTree(files, ipldResolver, source, () => { + source.end() + }) + } + ) + + return { + source: source, + sink: sink + } +} + +function makeWriter (source, files, ipldResolver) { + return (items, cb) => { + parallel(items.map((item) => (cb) => { + if (!item.content) { + return createAndStoreDir(item, ipldResolver, (err, node) => { + if (err) { + return cb(err) + } + source.push(node) + files.push(node) + cb() + }) + } + + createAndStoreFile(item, ipldResolver, (err, node) => { + if (err) { + return cb(err) + } + source.push(node) + files.push(node) + cb() + }) + }), cb) + } +} + +function createAndStoreDir (item, ipldResolver, cb) { + // 1. create the empty dir dag node + // 2. write it to the dag store + + const d = new UnixFS('directory') + const n = new DAGNode() + n.data = d.marshal() + + n.multihash((err, multihash) => { + if (err) { + return cb(err) + } + + ipldResolver.put({ + node: n, + cid: new CID(multihash) + }, (err) => { + if (err) { + return cb(err) + } + + n.size((err, size) => { + if (err) { + return cb(err) + } + + cb(null, { + path: item.path, + multihash: multihash, + size: size + }) + }) + }) + }) +} + +function createAndStoreFile (file, ipldResolver, cb) { + if (Buffer.isBuffer(file.content)) { + file.content = pull.values([file.content]) + } + + if (typeof file.content !== 'function') { + return cb(new Error('invalid content')) + } + + // 1. create the unixfs merkledag node + // 2. add its hash and size to the leafs array + + // TODO - Support really large files + // a) check if we already reach max chunks if yes + // a.1) create a parent node for all of the current leaves + // b.2) clean up the leaves array and add just the parent node + + pull( + file.content, + fsc(CHUNK_SIZE), + pull.asyncMap((chunk, cb) => { + const l = new UnixFS('file', Buffer(chunk)) + const n = new DAGNode(l.marshal()) + + n.multihash((err, multihash) => { + if (err) { + return cb(err) + } + + ipldResolver.put({ + node: n, + cid: new CID(multihash) + }, (err) => { + if (err) { + return cb(new Error('Failed to store chunk')) + } + + n.size((err, size) => { + if (err) { + return cb(err) + } + + cb(null, { + Hash: multihash, + Size: size, + leafSize: l.fileSize(), + Name: '' + }) + }) + }) + }) + }), + pull.collect((err, leaves) => { + if (err) { + return cb(err) + } + + if (leaves.length === 1) { + return cb(null, { + path: file.path, + multihash: leaves[0].Hash, + size: leaves[0].Size + }) + } + + // create a parent node and add all the leafs + + const f = new UnixFS('file') + const n = new DAGNode() + + for (let leaf of leaves) { + f.addBlockSize(leaf.leafSize) + n.addRawLink( + new DAGLink(leaf.Name, leaf.Size, leaf.Hash) + ) + } + + n.data = f.marshal() + + n.multihash((err, multihash) => { + if (err) { + return cb(err) + } + + ipldResolver.put({ + node: n, + cid: new CID(multihash) + }, (err) => { + if (err) { + return cb(err) + } + + n.size((err, size) => { + if (err) { + return cb(err) + } + + cb(null, { + path: file.path, + multihash: multihash, + size: size + }) + }) + }) + }) + }) + ) +} diff --git a/src/index.js b/src/index.js index ef611068..9ca42824 100644 --- a/src/index.js +++ b/src/index.js @@ -1,4 +1,4 @@ 'use strict' -exports.importer = exports.Importer = require('./importer.js') -exports.exporter = exports.Exporter = require('./exporter.js') +exports.importer = exports.Importer = require('./importer') +exports.exporter = exports.Exporter = require('./exporter') diff --git a/src/tree.js b/src/tree.js deleted file mode 100644 index 45ff9539..00000000 --- a/src/tree.js +++ /dev/null @@ -1,130 +0,0 @@ -'use strict' - -const mh = require('multihashes') -const UnixFS = require('ipfs-unixfs') -const merkleDAG = require('ipfs-merkle-dag') - -const DAGLink = merkleDAG.DAGLink -const DAGNode = merkleDAG.DAGNode - -module.exports = (files, dagService, source, cb) => { - // file struct - // { - // path: // full path - // multihash: // multihash of the dagNode - // size: // cumulative size - // dataSize: // dagNode size - // } - - // 1) convert files to a tree - // for each path, split, add to a json tree and in the end the name of the - // file points to an object that is has a key multihash and respective value - // { foo: { bar: { baz.txt: }}} - // the stop condition is if the value is not an object - const fileTree = {} - files.forEach((file) => { - let splitted = file.path.split('/') - if (splitted.length === 1) { - return // adding just one file - // fileTree[file.path] = bs58.encode(file.multihash).toString() - } - if (splitted[0] === '') { - splitted = splitted.slice(1) - } - var tmpTree = fileTree - - for (var i = 0; i < splitted.length; i++) { - if (!tmpTree[splitted[i]]) { - tmpTree[splitted[i]] = {} - } - if (i === splitted.length - 1) { - tmpTree[splitted[i]] = file.multihash - } else { - tmpTree = tmpTree[splitted[i]] - } - } - }) - - if (Object.keys(fileTree).length === 0) { - return cb()// no dirs to be created - } - - // 2) create a index for multihash: { size, dataSize } so - // that we can fetch these when creating the merkle dag nodes - - const mhIndex = {} - - files.forEach((file) => { - mhIndex[mh.toB58String(file.multihash)] = { - size: file.size, - dataSize: file.dataSize - } - }) - - // 3) expand leaves recursively - // create a dirNode - // Object.keys - // If the value is an Object - // create a dir Node - // Object.keys - // Once finished, add the result as a link to the dir node - // If the value is not an object - // add as a link to the dirNode - - let pendingWrites = 0 - - function traverse (tree, path, done) { - const keys = Object.keys(tree) - let tmpTree = tree - keys.map((key) => { - if (typeof tmpTree[key] === 'object' && - !Buffer.isBuffer(tmpTree[key])) { - tmpTree[key] = traverse.call(this, tmpTree[key], path ? path + '/' + key : key, done) - } - }) - - // at this stage, all keys are multihashes - // create a dir node - // add all the multihashes as links - // return this new node multihash - - const d = new UnixFS('directory') - const n = new DAGNode() - - keys.forEach((key) => { - const b58mh = mh.toB58String(tmpTree[key]) - const l = new DAGLink( - key, mhIndex[b58mh].size, tmpTree[key]) - n.addRawLink(l) - }) - - n.data = d.marshal() - - pendingWrites++ - dagService.put(n, (err) => { - pendingWrites-- - if (err) { - source.push(new Error('failed to store dirNode')) - } else if (path) { - source.push({ - path: path, - multihash: n.multihash(), - size: n.size() - }) - } - - if (pendingWrites <= 0) { - done() - } - }) - - if (!path) { - return - } - - mhIndex[mh.toB58String(n.multihash())] = { size: n.size() } - return n.multihash() - } - - traverse(fileTree, null, cb) -} diff --git a/test/test-exporter.js b/test/test-exporter.js index 9f3a8cad..e93db266 100644 --- a/test/test-exporter.js +++ b/test/test-exporter.js @@ -3,38 +3,43 @@ const expect = require('chai').expect const BlockService = require('ipfs-block-service') -const DAGService = require('ipfs-merkle-dag').DAGService +const IPLDResolver = require('ipld-resolver') const UnixFS = require('ipfs-unixfs') const fs = require('fs') const path = require('path') const bs58 = require('bs58') const pull = require('pull-stream') const zip = require('pull-zip') +const CID = require('cids') const unixFSEngine = require('./../src') const exporter = unixFSEngine.exporter module.exports = (repo) => { describe('exporter', () => { - let ds + let ipldResolver const bigFile = fs.readFileSync(path.join(__dirname, '/test-data/1.2MiB.txt')) + before(() => { const bs = new BlockService(repo) - ds = new DAGService(bs) + ipldResolver = new IPLDResolver(bs) }) it('import and export', (done) => { pull( pull.values([{ path: '1.2MiB.txt', - content: pull.values([bigFile, Buffer('hello world')]) + content: pull.values([ + bigFile, + Buffer('hello world') + ]) }]), - unixFSEngine.importer(ds), + unixFSEngine.importer(ipldResolver), pull.map((file) => { expect(file.path).to.be.eql('1.2MiB.txt') - return exporter(file.multihash, ds) + return exporter(file.multihash, ipldResolver) }), pull.flatten(), pull.collect((err, files) => { @@ -50,7 +55,7 @@ module.exports = (repo) => { const mhBuf = new Buffer(bs58.decode(hash)) pull( - ds.getStream(hash), + ipldResolver.getStream(new CID(hash)), pull.map((node) => UnixFS.unmarshal(node.data)), pull.collect((err, nodes) => { expect(err).to.not.exist @@ -58,7 +63,7 @@ module.exports = (repo) => { const unmarsh = nodes[0] pull( - exporter(mhBuf, ds), + exporter(mhBuf, ipldResolver), pull.collect(onFiles) ) @@ -79,10 +84,10 @@ module.exports = (repo) => { pull( zip( pull( - ds.getStream(hash), + ipldResolver.getStream(new CID(hash)), pull.map((node) => UnixFS.unmarshal(node.data)) ), - exporter(hash, ds) + exporter(hash, ipldResolver) ), pull.collect((err, values) => { expect(err).to.not.exist @@ -97,7 +102,7 @@ module.exports = (repo) => { it('export a small file with links', (done) => { const hash = 'QmW7BDxEbGqxxSYVtn3peNPQgdDXbWkoQ6J1EFYAEuQV3Q' pull( - exporter(hash, ds), + exporter(hash, ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist @@ -109,7 +114,7 @@ module.exports = (repo) => { it('export a large file > 5mb', (done) => { const hash = 'QmRQgufjp9vLE8XK2LGKZSsPCFCF6e4iynCQtNB5X2HBKE' pull( - exporter(hash, ds), + exporter(hash, ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist @@ -123,7 +128,7 @@ module.exports = (repo) => { const hash = 'QmWChcSFMNcFkfeJtNd8Yru1rE6PhtCRfewi1tMwjkwKjN' pull( - exporter(hash, ds), + exporter(hash, ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist @@ -162,7 +167,7 @@ module.exports = (repo) => { const hash = 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn' pull( - exporter(hash, ds), + exporter(hash, ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist expect(files[0].content).to.not.exist @@ -176,7 +181,7 @@ module.exports = (repo) => { const hash = 'QmWChcSFMNcFkfeJtNd8Yru1rE6PhtCRfewi1tMwjkwKj3' pull( - exporter(hash, ds), + exporter(hash, ipldResolver), pull.collect((err, files) => { expect(err).to.exist done() @@ -190,7 +195,9 @@ function fileEql (f1, f2, done) { pull( f1.content, pull.collect((err, data) => { - if (err) return done(err) + if (err) { + return done(err) + } try { if (f2) { diff --git a/test/test-fixed-size-chunker.js b/test/test-fixed-size-chunker.js index 08a1a250..e3e8a076 100644 --- a/test/test-fixed-size-chunker.js +++ b/test/test-fixed-size-chunker.js @@ -1,7 +1,7 @@ /* eslint-env mocha */ 'use strict' -const chunker = require('./../src/chunker-fixed-size') +const chunker = require('./../src/chunker/fixed-size') const fs = require('fs') const expect = require('chai').expect const path = require('path') diff --git a/test/test-importer.js b/test/test-importer.js index d2eaa0e0..5b4fb9cb 100644 --- a/test/test-importer.js +++ b/test/test-importer.js @@ -4,11 +4,11 @@ const importer = require('./../src').importer const expect = require('chai').expect const BlockService = require('ipfs-block-service') -const DAGService = require('ipfs-merkle-dag').DAGService const fs = require('fs') const path = require('path') const pull = require('pull-stream') const mh = require('multihashes') +const IPLDResolver = require('ipld-resolver') function stringifyMh (files) { return files.map((file) => { @@ -19,7 +19,7 @@ function stringifyMh (files) { module.exports = function (repo) { describe('importer', function () { - let ds + let ipldResolver const bigFile = fs.readFileSync(path.join(__dirname, '/test-data/1.2MiB.txt')) const smallFile = fs.readFileSync(path.join(__dirname, '/test-data/200Bytes.txt')) @@ -30,7 +30,7 @@ module.exports = function (repo) { before(() => { const bs = new BlockService(repo) - ds = new DAGService(bs) + ipldResolver = new IPLDResolver(bs) }) it('bad input', (done) => { @@ -39,7 +39,7 @@ module.exports = function (repo) { path: '200Bytes.txt', content: 'banana' }]), - importer(ds), + importer(ipldResolver), pull.onEnd((err) => { expect(err).to.exist done() @@ -53,7 +53,7 @@ module.exports = function (repo) { path: '200Bytes.txt', content: pull.values([smallFile]) }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist expect(stringifyMh(files)).to.be.eql([{ @@ -72,7 +72,7 @@ module.exports = function (repo) { path: '200Bytes.txt', content: smallFile }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist expect(stringifyMh(files)).to.be.eql([{ @@ -91,7 +91,7 @@ module.exports = function (repo) { path: 'foo/bar/200Bytes.txt', content: pull.values([smallFile]) }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist expect(files.length).to.equal(3) @@ -129,7 +129,7 @@ module.exports = function (repo) { path: '1.2MiB.txt', content: pull.values([bigFile]) }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist expect(stringifyMh(files)).to.be.eql([{ @@ -148,7 +148,7 @@ module.exports = function (repo) { path: 'foo-big/1.2MiB.txt', content: pull.values([bigFile]) }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist @@ -176,7 +176,7 @@ module.exports = function (repo) { pull.values([{ path: 'empty-dir' }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist @@ -200,7 +200,7 @@ module.exports = function (repo) { path: 'pim/1.2MiB.txt', content: pull.values([bigFile]) }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist @@ -235,7 +235,7 @@ module.exports = function (repo) { path: 'pam/1.2MiB.txt', content: pull.values([bigFile]) }]), - importer(ds), + importer(ipldResolver), pull.collect((err, files) => { expect(err).to.not.exist