From e2bedd2a842a91f02f16736ef7f0ff3b17e3a74d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maximilian=20G=C3=BCntner?= Date: Sat, 28 Jan 2017 02:33:28 +0100 Subject: [PATCH 001/104] libstore: add basic IPFS binary cache support either /ipfs or /ipns URIs can be used to address a store. To change the default values (127.0.0.1, 5001) for the local API `host` and `port` are used in the query such as /ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn?host=192.168.42.1&port=1111 If a gateway should be used instead of a local daemon `use_gateway` is set to 1. If the another gateway should be used, `gateway` can be changed. In combination: /ipns/cache.example.com?gateway=https://gw.example.com --- src/libstore/download.cc | 21 ++++ src/libstore/download.hh | 2 + src/libstore/ipfs-binary-cache-store.cc | 158 ++++++++++++++++++++++++ src/libstore/ipfs.hh | 32 +++++ src/libstore/local.mk | 30 ++++- 5 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 src/libstore/ipfs-binary-cache-store.cc create mode 100644 src/libstore/ipfs.hh diff --git a/src/libstore/download.cc b/src/libstore/download.cc index 8030e83b0dd..63442f3029d 100644 --- a/src/libstore/download.cc +++ b/src/libstore/download.cc @@ -18,6 +18,8 @@ namespace nix { +MakeError(URLEncodeError, Error); + double getTime() { struct timeval tv; @@ -485,6 +487,22 @@ struct CurlDownloader : public Downloader item->failure = failure; enqueueItem(item); } + + std::string urlEncode(const std::string & param) override { + //TODO reuse curl handle or move function to another class/file + CURL *curl = curl_easy_init(); + char *encoded = NULL; + if (curl) { + encoded = curl_easy_escape(curl, param.c_str(), 0); + } + if ((curl == NULL) || (encoded == NULL)) { + throw URLEncodeError("Could not encode param"); + } + std::string ret(encoded); + curl_free(encoded); + curl_easy_cleanup(curl); + return ret; + } }; ref getDownloader() @@ -622,6 +640,9 @@ Path Downloader::downloadCached(ref store, const string & url_, bool unpa return storePath; } +std::string Downloader::urlEncode(const std::string & param) { + throw URLEncodeError("not implemented"); +} bool isUri(const string & s) { diff --git a/src/libstore/download.hh b/src/libstore/download.hh index 82b5d641fde..77419973d6f 100644 --- a/src/libstore/download.hh +++ b/src/libstore/download.hh @@ -52,6 +52,8 @@ struct Downloader Path downloadCached(ref store, const string & uri, bool unpack, string name = "", const Hash & expectedHash = Hash(), string * effectiveUri = nullptr); + virtual std::string urlEncode(const std::string & param); + enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc new file mode 100644 index 00000000000..69d33ce3275 --- /dev/null +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -0,0 +1,158 @@ +#include + +#include "binary-cache-store.hh" +#include "download.hh" +#include "nar-info-disk-cache.hh" +#include "ipfs.hh" + +namespace nix { + +MakeError(UploadToIPFS, Error); + +class IPFSBinaryCacheStore : public BinaryCacheStore +{ + +private: + + std::string cacheUri; + + /* Host where a IPFS API can be reached (usually localhost) */ + std::string ipfsAPIHost; + /* Port where a IPFS API can be reached (usually 5001) */ + uint16_t ipfsAPIPort; + /* Whether to use a IPFS Gateway instead of the API */ + bool useIpfsGateway; + /* Where to find a IPFS Gateway */ + std::string ipfsGatewayURL; + + std::string constructIPFSRequest(const std::string & path) { + std::string uri; + std::string ipfsPath = cacheUri + "/" + path; + if (useIpfsGateway == false) { + uri = ipfs::buildAPIURL(ipfsAPIHost, ipfsAPIPort) + + "/cat" + + ipfs::buildQuery({{"arg", ipfsPath}}); + } else { + uri = ipfsGatewayURL + ipfsPath; + } + return uri; + } + +public: + + IPFSBinaryCacheStore( + const Params & params, const Path & _cacheUri) + : BinaryCacheStore(params) + , cacheUri(_cacheUri) + , ipfsAPIHost(get(params, "host", "127.0.0.1")) + , ipfsAPIPort(std::stoi(get(params, "port", "5001"))) + , useIpfsGateway(get(params, "use_gateway", "0") == "1") + , ipfsGatewayURL(get(params, "gateway", "https://ipfs.io")) + { + if (cacheUri.back() == '/') + cacheUri.pop_back(); + /* + * A cache is still useful since the IPFS API or + * gateway may have a higher latency when not running on + * localhost + */ + diskCache = getNarInfoDiskCache(); + } + + std::string getUri() override + { + return cacheUri; + } + + void init() override + { + if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) { + try { + BinaryCacheStore::init(); + } catch (UploadToIPFS &) { + throw Error(format("‘%s’ does not appear to be a binary cache") % cacheUri); + } + diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority); + } + } + +protected: + + bool fileExists(const std::string & path) override + { + /* + * TODO: Try a local mount first, best to share code with + * LocalBinaryCacheStore + */ + + /* TODO: perform ipfs ls instead instead of trying to fetch it */ + auto uri = constructIPFSRequest(path); + try { + DownloadRequest request(uri); + request.showProgress = DownloadRequest::no; + request.tries = 5; + if (useIpfsGateway) + request.head = true; + getDownloader()->download(request); + return true; + } catch (DownloadError & e) { + if (e.error == Downloader::NotFound) + return false; + throw; + } + } + + void upsertFile(const std::string & path, const std::string & data) override + { + throw UploadToIPFS("uploading to an IPFS binary cache is not supported"); + } + + void getFile(const std::string & path, + std::function)> success, + std::function failure) override + { + /* + * TODO: Try local mount first, best to share code with + * LocalBinaryCacheStore + */ + auto uri = constructIPFSRequest(path); + DownloadRequest request(uri); + request.showProgress = DownloadRequest::no; + request.tries = 8; + + getDownloader()->enqueueDownload(request, + [success](const DownloadResult & result) { + success(result.data); + }, + [success, failure](std::exception_ptr exc) { + try { + std::rethrow_exception(exc); + } catch (DownloadError & e) { + if (e.error == Downloader::NotFound) + return success(0); + failure(exc); + } catch (...) { + failure(exc); + } + }); + } + +}; + +static RegisterStoreImplementation regStore([]( + const std::string & uri, const Store::Params & params) + -> std::shared_ptr +{ + /* + * TODO: maybe use ipfs:/ fs:/ipfs/ + * https://github.com/ipfs/go-ipfs/issues/1678#issuecomment-157478515 + */ + if (uri.substr(0, strlen("/ipfs/")) != "/ipfs/" && + uri.substr(0, strlen("/ipns/")) != "/ipns/") + return 0; + auto store = std::make_shared(params, uri); + store->init(); + return store; +}); + +} diff --git a/src/libstore/ipfs.hh b/src/libstore/ipfs.hh new file mode 100644 index 00000000000..c5e843e19f8 --- /dev/null +++ b/src/libstore/ipfs.hh @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +#include "types.hh" +#include "download.hh" + +namespace nix { +namespace ipfs { + +MakeError (CommandError, Error); + +inline std::string buildAPIURL(const std::string & host, + uint16_t port = 5001, + const std::string & version = "v0") +{ + return "http://" + host + ":" + std::to_string(port) + "/api/" + version; +} + +inline std::string buildQuery(const std::vector> & params = {}) { + std::string query = "?stream-channels=true&json=true&encoding=json"; + for (auto& param : params) { + std::string key = getDownloader()->urlEncode(param.first); + std::string value = getDownloader()->urlEncode(param.second); + query += "&" + key + "=" + value; + } + return query; +} + +} +} diff --git a/src/libstore/local.mk b/src/libstore/local.mk index 9d5c04dca0c..d17eb040d9d 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -4,7 +4,34 @@ libstore_NAME = libnixstore libstore_DIR := $(d) -libstore_SOURCES := $(wildcard $(d)/*.cc) +libstore_SOURCES := \ + $(d)/binary-cache-store.cc \ + $(d)/build.cc \ + $(d)/builtins.cc \ + $(d)/crypto.cc \ + $(d)/derivations.cc \ + $(d)/download.cc \ + $(d)/export-import.cc \ + $(d)/gc.cc \ + $(d)/globals.cc \ + $(d)/http-binary-cache-store.cc \ + $(d)/local-binary-cache-store.cc \ + $(d)/local-fs-store.cc \ + $(d)/local-store.cc \ + $(d)/misc.cc \ + $(d)/nar-accessor.cc \ + $(d)/nar-info.cc \ + $(d)/nar-info-disk-cache.cc \ + $(d)/optimise-store.cc \ + $(d)/pathlocks.cc \ + $(d)/profiles.cc \ + $(d)/references.cc \ + $(d)/remote-fs-accessor.cc \ + $(d)/remote-store.cc \ + $(d)/sqlite.cc \ + $(d)/ssh-store.cc \ + $(d)/store-api.cc \ + $(d)/ipfs-binary-cache-store.cc libstore_LIBS = libutil libformat @@ -12,6 +39,7 @@ libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread ifeq ($(ENABLE_S3), 1) libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core + libstore_SOURCES += $(d)/s3-binary-cache-store.cc endif ifeq ($(OS), SunOS) From fb4724220430bee79391a345fed217e3d36858bf Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Thu, 31 Aug 2017 21:41:36 +0100 Subject: [PATCH 002/104] src/libmain/stack.cc: fix 'ucontext' usage on glibc-2.26 Build fails as: $ make CXX src/libmain/stack.o src/libmain/stack.cc: In function 'void nix::sigsegvHandler(int, siginfo_t*, void*)': src/libmain/stack.cc:21:21: error: 'ucontext' was not declared in this scope sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_RSP]; ^~~~~~~~ src/libmain/stack.cc:21:21: note: suggested alternative: 'ucontext_t' sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_RSP]; ^~~~~~~~ ucontext_t It's caused by upstream rename: https://sourceware.org/git/?p=glibc.git;a=commitdiff;h=251287734e89a52da3db682a8241eb6bccc050c9 which basically changes typedef struct ucontext {} ucontext_t; to typedef struct ucontext_t {} ucontext_t; The change uses ucontext_t. Signed-off-by: Sergei Trofimovich --- src/libmain/stack.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libmain/stack.cc b/src/libmain/stack.cc index abf59dc4baa..b9eb2fa18e5 100644 --- a/src/libmain/stack.cc +++ b/src/libmain/stack.cc @@ -20,9 +20,9 @@ static void sigsegvHandler(int signo, siginfo_t * info, void * ctx) bool haveSP = true; char * sp = 0; #if defined(__x86_64__) && defined(REG_RSP) - sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_RSP]; + sp = (char *) ((ucontext_t *) ctx)->uc_mcontext.gregs[REG_RSP]; #elif defined(REG_ESP) - sp = (char *) ((ucontext *) ctx)->uc_mcontext.gregs[REG_ESP]; + sp = (char *) ((ucontext_t *) ctx)->uc_mcontext.gregs[REG_ESP]; #else haveSP = false; #endif From 62b6cb9f91f5727fa7a69e905150da9b8b477c05 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 8 Jun 2020 11:56:21 -0400 Subject: [PATCH 003/104] Update the ipfs binary store code --- src/libstore/ipfs-binary-cache-store.cc | 38 +++++++++++++------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 05de85b4cbc..d6e4309ae7b 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -44,10 +44,10 @@ class IPFSBinaryCacheStore : public BinaryCacheStore const Params & params, const Path & _cacheUri) : BinaryCacheStore(params) , cacheUri(_cacheUri) - , ipfsAPIHost(get(params, "host", "127.0.0.1")) - , ipfsAPIPort(std::stoi(get(params, "port", "5001"))) - , useIpfsGateway(get(params, "use_gateway", "0") == "1") - , ipfsGatewayURL(get(params, "gateway", "https://ipfs.io")) + , ipfsAPIHost(get(params, "host").value_or("127.0.0.1")) + , ipfsAPIPort(std::stoi(get(params, "port").value_or("5001"))) + , useIpfsGateway(get(params, "use_gateway").value_or("0") == "1") + , ipfsGatewayURL(get(params, "gateway").value_or("https://ipfs.io")) { if (cacheUri.back() == '/') cacheUri.pop_back(); @@ -66,13 +66,16 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void init() override { - if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) { + if (auto cacheInfo = diskCache->cacheExists(getUri())) { + wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false"); + priority.setDefault(fmt("%d", cacheInfo->priority)); + } else { try { BinaryCacheStore::init(); } catch (UploadToIPFS &) { throw Error(format("‘%s’ does not appear to be a binary cache") % cacheUri); } - diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority); + diskCache->createCache(cacheUri, storeDir, wantMassQuery, priority); } } @@ -108,8 +111,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore } void getFile(const std::string & path, - std::function)> success, - std::function failure) override + Callback> callback) noexcept override { /* * TODO: Try local mount first, best to share code with @@ -120,21 +122,21 @@ class IPFSBinaryCacheStore : public BinaryCacheStore //request.showProgress = DownloadRequest::no; request.tries = 8; + auto callbackPtr = std::make_shared(std::move(callback)); + getDownloader()->enqueueDownload(request, - [success](const DownloadResult & result) { - success(result.data); - }, - [success, failure](std::exception_ptr exc) { + {[callbackPtr](std::future result){ try { - std::rethrow_exception(exc); + (*callbackPtr)(result.get().data); } catch (DownloadError & e) { - if (e.error == Downloader::NotFound) - return success(0); - failure(exc); + if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden) + return (*callbackPtr)(std::shared_ptr()); + callbackPtr->rethrow(); } catch (...) { - failure(exc); + callbackPtr->rethrow(); } - }); + }} + ); } }; From aa2b8f0ac7a394f9413e229b601cbf25e4e6bd4b Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:46:08 -0500 Subject: [PATCH 004/104] Support POST in file transfer This adds a post bool that can be used to POST content instead of GET or PUT it. --- src/libstore/filetransfer.cc | 2 ++ src/libstore/filetransfer.hh | 1 + 2 files changed, 3 insertions(+) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index b96fd084d9c..c907f628b26 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -282,6 +282,8 @@ struct curlFileTransfer : public FileTransfer if (request.head) curl_easy_setopt(req, CURLOPT_NOBODY, 1); + else if (request.post) + curl_easy_setopt(req, CURLOPT_POST, 1); if (request.data) { curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 0fbda4c22ab..6f05c929c08 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -39,6 +39,7 @@ struct FileTransferRequest std::string expectedETag; bool verifyTLS = true; bool head = false; + bool post = false; size_t tries = fileTransferSettings.tries; unsigned int baseRetryTimeMs = 250; ActivityId parentAct; From f7f27a28756d83f653e70264f272acbf007316da Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:46:35 -0500 Subject: [PATCH 005/104] Support data with POST in file transfer need to use a multipart form to correctly insert the data. this mirrors how curl -Ffile=asdf works. --- src/libstore/filetransfer.cc | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index c907f628b26..353c3d098ad 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -286,10 +286,18 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_POST, 1); if (request.data) { - curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); - curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); - curl_easy_setopt(req, CURLOPT_READDATA, this); - curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); + if (request.post) { + // based off of https://curl.haxx.se/libcurl/c/postit2.html + curl_mime *form = curl_mime_init(req); + curl_mimepart *field = curl_mime_addpart(form); + curl_mime_data(field, request.data->data(), request.data->length()); + curl_easy_setopt(req, CURLOPT_MIMEPOST, form); + } else { + curl_easy_setopt(req, CURLOPT_UPLOAD, 1L); + curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper); + curl_easy_setopt(req, CURLOPT_READDATA, this); + curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length()); + } } if (request.verifyTLS) { From aa0e736b007230874e9742b82627c85cd5050c2e Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:49:42 -0500 Subject: [PATCH 006/104] Consolidate ipfs-binary-cache-store.cc to one file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes a number of changes: - Use ipfs:// and ipns:// for URI, this is a proper URI and now documented by ipfs - Remove ipfs.hh, move everything into store - Store daemonUri, which is the, replacing individual API settings - Remove support for gateways - Remove nar info cache - ipfs should do all of the caching for us - Only retry getFile once - if it doesn’t work the ipfs daemon is probably down - Use /object/stat to check if file exists instead of cat --- src/libstore/ipfs-binary-cache-store.cc | 101 +++++++----------------- src/libstore/ipfs.hh | 32 -------- 2 files changed, 30 insertions(+), 103 deletions(-) delete mode 100644 src/libstore/ipfs.hh diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 5a2d4b591cf..69bf38dcb72 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -3,7 +3,6 @@ #include "binary-cache-store.hh" #include "filetransfer.hh" #include "nar-info-disk-cache.hh" -#include "ipfs.hh" namespace nix { @@ -15,28 +14,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore private: std::string cacheUri; + std::string daemonUri; - /* Host where a IPFS API can be reached (usually localhost) */ - std::string ipfsAPIHost; - /* Port where a IPFS API can be reached (usually 5001) */ - uint16_t ipfsAPIPort; - /* Whether to use a IPFS Gateway instead of the API */ - bool useIpfsGateway; - /* Where to find a IPFS Gateway */ - std::string ipfsGatewayURL; - - std::string constructIPFSRequest(const std::string & path) { - std::string uri; - std::string ipfsPath = cacheUri + "/" + path; - if (useIpfsGateway == false) { - uri = ipfs::buildAPIURL(ipfsAPIHost, ipfsAPIPort) + - "/cat" + - ipfs::buildQuery({{"arg", ipfsPath}}); - } else { - uri = ipfsGatewayURL + ipfsPath; - } - return uri; - } + std::string ipfsPath; public: @@ -44,19 +24,20 @@ class IPFSBinaryCacheStore : public BinaryCacheStore const Params & params, const Path & _cacheUri) : BinaryCacheStore(params) , cacheUri(_cacheUri) - , ipfsAPIHost(get(params, "host").value_or("127.0.0.1")) - , ipfsAPIPort(std::stoi(get(params, "port").value_or("5001"))) - , useIpfsGateway(get(params, "use_gateway").value_or("0") == "1") - , ipfsGatewayURL(get(params, "gateway").value_or("https://ipfs.io")) { if (cacheUri.back() == '/') cacheUri.pop_back(); - /* - * A cache is still useful since the IPFS API or - * gateway may have a higher latency when not running on - * localhost - */ - diskCache = getNarInfoDiskCache(); + + if (hasPrefix(cacheUri, "ipfs://")) + ipfsPath = "/ipfs/" + std::string(cacheUri, 7); + else if (hasPrefix(cacheUri, "ipns://")) + ipfsPath = "/ipns/" + std::string(cacheUri, 7); + else + throw Error("unknown IPFS URI '%s'", cacheUri); + + std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); + std::string ipfsAPIPort(get(params, "port").value_or("5001")); + daemonUri = "http://" + ipfsAPIHost + ":" + ipfsAPIPort; } std::string getUri() override @@ -66,42 +47,28 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void init() override { - if (auto cacheInfo = diskCache->cacheExists(getUri())) { - wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false"); - priority.setDefault(fmt("%d", cacheInfo->priority)); - } else { - try { - BinaryCacheStore::init(); - } catch (UploadToIPFS &) { - throw Error(format("‘%s’ does not appear to be a binary cache") % cacheUri); - } - diskCache->createCache(cacheUri, storeDir, wantMassQuery, priority); } + BinaryCacheStore::init(); } protected: bool fileExists(const std::string & path) override { - /* - * TODO: Try a local mount first, best to share code with - * LocalBinaryCacheStore - */ + auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); - /* TODO: perform ipfs ls instead instead of trying to fetch it */ - auto uri = constructIPFSRequest(path); + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; try { - FileTransferRequest request(uri); - //request.showProgress = FileTransferRequest::no; - request.tries = 5; - if (useIpfsGateway) - request.head = true; - getFileTransfer()->download(request); - return true; + auto res = getFileTransfer()->download(request); + auto json = nlohmann::json::parse(*res.data); + + return json.find("Hash") != json.end(); } catch (FileTransferError & e) { - if (e.error == FileTransfer::NotFound) - return false; - throw; + // probably should verify this is a not found error but + // ipfs gives us a 500 + return false; } } @@ -113,14 +80,10 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void getFile(const std::string & path, Callback> callback) noexcept override { - /* - * TODO: Try local mount first, best to share code with - * LocalBinaryCacheStore - */ - auto uri = constructIPFSRequest(path); + auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); + FileTransferRequest request(uri); - //request.showProgress = FileTransferRequest::no; - request.tries = 8; + request.tries = 1; auto callbackPtr = std::make_shared(std::move(callback)); @@ -145,12 +108,8 @@ static RegisterStoreImplementation regStore([]( const std::string & uri, const Store::Params & params) -> std::shared_ptr { - /* - * TODO: maybe use ipfs:/ fs:/ipfs/ - * https://github.com/ipfs/go-ipfs/issues/1678#issuecomment-157478515 - */ - if (uri.substr(0, strlen("/ipfs/")) != "/ipfs/" && - uri.substr(0, strlen("/ipns/")) != "/ipns/") + if (uri.substr(0, strlen("ipfs://")) != "ipfs://" && + uri.substr(0, strlen("ipns://")) != "ipns://") return 0; auto store = std::make_shared(params, uri); store->init(); diff --git a/src/libstore/ipfs.hh b/src/libstore/ipfs.hh deleted file mode 100644 index 00e99dbbf09..00000000000 --- a/src/libstore/ipfs.hh +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include -#include - -#include "types.hh" -#include "filetransfer.hh" - -namespace nix { -namespace ipfs { - -MakeError (CommandError, Error); - -inline std::string buildAPIURL(const std::string & host, - uint16_t port = 5001, - const std::string & version = "v0") -{ - return "http://" + host + ":" + std::to_string(port) + "/api/" + version; -} - -inline std::string buildQuery(const std::vector> & params = {}) { - std::string query = "?stream-channels=true&json=true&encoding=json"; - for (auto& param : params) { - std::string key = getFileTransfer()->urlEncode(param.first); - std::string value = getFileTransfer()->urlEncode(param.second); - query += "&" + key + "=" + value; - } - return query; -} - -} -} From adec10c8d80ae78d39e29ee8380afbe0934fe025 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:52:40 -0500 Subject: [PATCH 007/104] Use /api/v0/version to check if ipfs daemon is running To check if IPFS daemon works, we need to do a test that should alway work. In the future, we may want to have a min IPFS daemon that we support, but for now any version will work. --- src/libstore/ipfs-binary-cache-store.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 69bf38dcb72..debdbd5cdcb 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -1,4 +1,5 @@ #include +#include #include "binary-cache-store.hh" #include "filetransfer.hh" @@ -38,6 +39,15 @@ class IPFSBinaryCacheStore : public BinaryCacheStore std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); std::string ipfsAPIPort(get(params, "port").value_or("5001")); daemonUri = "http://" + ipfsAPIHost + ":" + ipfsAPIPort; + + // Check the IPFS daemon is running + FileTransferRequest request(daemonUri + "/api/v0/version"); + request.post = true; + request.tries = 1; + auto res = getFileTransfer()->download(request); + auto versionInfo = nlohmann::json::parse(*res.data); + if (versionInfo.find("Version") == versionInfo.end()) + throw Error("daemon for IPFS is not running properly"); } std::string getUri() override From e39ce69c379e5ab0a47d1e399b5236540e8f883e Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:55:52 -0500 Subject: [PATCH 008/104] =?UTF-8?q?Throw=20error=20if=20/ipfs/=20is=20used?= =?UTF-8?q?=20but=20path=20doesn=E2=80=99t=20exist?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is okay for /ipns/ because we can always mutate it, but it’s a hard error for ipfs. --- src/libstore/ipfs-binary-cache-store.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index debdbd5cdcb..afeea684ea7 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -48,6 +48,10 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto versionInfo = nlohmann::json::parse(*res.data); if (versionInfo.find("Version") == versionInfo.end()) throw Error("daemon for IPFS is not running properly"); + + // root should already exist + if (!fileExists("") && hasPrefix(ipfsPath, "/ipfs/")) + throw Error("path '%s' is not found", ipfsPath); } std::string getUri() override From f33f8e6dae5a470aeda681144ab3c3e83f9c4938 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:57:02 -0500 Subject: [PATCH 009/104] Support uploading with /ipns/ in ipfs-binary-cache-store This is really slow, but is useful to test things. It does a few API calls to work: - /api/v0/add : to add the new file - /api/v0/object/patch/add-link : to insert the file into unixfs - /api/v0/name/publish : to publish the new ipfs object to ipns The last step is the slow part since we need to wait for it to clear. --- src/libstore/ipfs-binary-cache-store.cc | 40 ++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index afeea684ea7..1203577f27d 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -88,7 +88,45 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override { - throw UploadToIPFS("uploading to an IPFS binary cache is not supported"); + if (hasPrefix(ipfsPath, "/ipfs/")) + throw Error("%s is immutable, cannot modify", ipfsPath); + + // TODO: use callbacks + + auto req1 = FileTransferRequest(daemonUri + "/api/v0/add"); + req1.data = std::make_shared(data); + req1.post = true; + req1.tries = 1; + try { + auto res1 = getFileTransfer()->upload(req1); + auto json1 = nlohmann::json::parse(*res1.data); + + auto addedPath = "/ipfs/" + (std::string) json1["Hash"]; + + auto uri1 = daemonUri + "/api/v0/object/patch/add-link?create=true"; + uri1 += "&arg=" + getFileTransfer()->urlEncode(ipfsPath); + uri1 += "&arg=" + getFileTransfer()->urlEncode(path); + uri1 += "&arg=" + getFileTransfer()->urlEncode(addedPath); + + auto req2 = FileTransferRequest(uri1); + req2.post = true; + req2.tries = 1; + auto res2 = getFileTransfer()->download(req2); + auto json2 = nlohmann::json::parse(*res2.data); + + auto newRoot = json2["Hash"]; + + auto uri2 = daemonUri + "/api/v0/name/publish?arg=" + getFileTransfer()->urlEncode(newRoot); + uri2 += "&key=" + std::string(ipfsPath, 6); + + // WARNING: this can be really slow + auto req3 = FileTransferRequest(uri2); + req3.post = true; + req3.tries = 1; + getFileTransfer()->download(req3); + } catch (FileTransferError & e) { + throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); + } } void getFile(const std::string & path, From 37393d188d76e63d22f579d0288560563124981d Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 18:58:47 -0500 Subject: [PATCH 010/104] =?UTF-8?q?Create=20file=20if=20nix-cache-info=20d?= =?UTF-8?q?oesn=E2=80=99t=20exist?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/libstore/ipfs-binary-cache-store.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 1203577f27d..238a8078e24 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -61,6 +61,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void init() override { + std::string cacheInfoFile = "nix-cache-info"; + if (!fileExists(cacheInfoFile)) { + upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info"); } BinaryCacheStore::init(); } From a9343ed69e53ec4dfadfa25894842bb987c40ad8 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 19:42:57 -0500 Subject: [PATCH 011/104] Use POST for IPFS getFile --- src/libstore/ipfs-binary-cache-store.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 238a8078e24..46257c9e127 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -138,6 +138,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); FileTransferRequest request(uri); + request.post = true; request.tries = 1; auto callbackPtr = std::make_shared(std::move(callback)); From ab712b4b4d45d5017722764bcaeb20b28ff37300 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 9 Jun 2020 19:53:13 -0500 Subject: [PATCH 012/104] Ignore 500 errors in ipfs binary cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit unfortunately we don’t have a good way to handle this for now, avoid rethrowing and just accept 500 --- src/libstore/ipfs-binary-cache-store.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 46257c9e127..74a58a5386d 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -148,9 +148,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore try { (*callbackPtr)(result.get().data); } catch (FileTransferError & e) { - if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden) - return (*callbackPtr)(std::shared_ptr()); - callbackPtr->rethrow(); + return (*callbackPtr)(std::shared_ptr()); } catch (...) { callbackPtr->rethrow(); } From 3530ab10b02aa75c66bf5d619f9f1aacd539ac73 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 10:46:02 -0500 Subject: [PATCH 013/104] Use offline to speed up IPFS publish MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this doesn’t wait for the final response, but updates ipns internally. --- src/libstore/ipfs-binary-cache-store.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 74a58a5386d..df13013e8c6 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -72,7 +72,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore bool fileExists(const std::string & path) override { - auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); + auto uri = daemonUri + "/api/v0/object/stat?offline=true&arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); FileTransferRequest request(uri); request.post = true; @@ -106,7 +106,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto addedPath = "/ipfs/" + (std::string) json1["Hash"]; - auto uri1 = daemonUri + "/api/v0/object/patch/add-link?create=true"; + auto uri1 = daemonUri + "/api/v0/object/patch/add-link?offline=true&create=true"; uri1 += "&arg=" + getFileTransfer()->urlEncode(ipfsPath); uri1 += "&arg=" + getFileTransfer()->urlEncode(path); uri1 += "&arg=" + getFileTransfer()->urlEncode(addedPath); @@ -119,10 +119,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto newRoot = json2["Hash"]; - auto uri2 = daemonUri + "/api/v0/name/publish?arg=" + getFileTransfer()->urlEncode(newRoot); + auto uri2 = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(newRoot); uri2 += "&key=" + std::string(ipfsPath, 6); - // WARNING: this can be really slow auto req3 = FileTransferRequest(uri2); req3.post = true; req3.tries = 1; @@ -135,7 +134,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void getFile(const std::string & path, Callback> callback) noexcept override { - auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); + auto uri = daemonUri + "/api/v0/cat?offline=true&arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); FileTransferRequest request(uri); request.post = true; From c135e05a5eb7074bbb156df6d108dbe9ce9809e5 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 11:39:31 -0500 Subject: [PATCH 014/104] Keep track of in progress upsert To avoid race conditions while doing /object/patch/add-link & /name/publish, we need to lock some state. This adds a bool for inProgressUpsert so that we can know when other threads are also trying to publish to IPNS. --- src/libstore/ipfs-binary-cache-store.cc | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index df13013e8c6..270624a1eaf 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -19,6 +19,12 @@ class IPFSBinaryCacheStore : public BinaryCacheStore std::string ipfsPath; + struct State + { + bool inProgressUpsert = false; + }; + Sync _state; + public: IPFSBinaryCacheStore( @@ -106,6 +112,13 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto addedPath = "/ipfs/" + (std::string) json1["Hash"]; + auto state(_state.lock()); + + if (state->inProgressUpsert) + throw Error("a modification to the IPNS is already in progress"); + + state->inProgressUpsert = true; + auto uri1 = daemonUri + "/api/v0/object/patch/add-link?offline=true&create=true"; uri1 += "&arg=" + getFileTransfer()->urlEncode(ipfsPath); uri1 += "&arg=" + getFileTransfer()->urlEncode(path); @@ -126,6 +139,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore req3.post = true; req3.tries = 1; getFileTransfer()->download(req3); + + state->inProgressUpsert = false; } catch (FileTransferError & e) { throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); } From 6be117fcadb4e9bb7326e02f0aa165225cf70272 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 12:32:07 -0500 Subject: [PATCH 015/104] Use addrType enum in IPFSBinaryCacheStore --- src/libstore/ipfs-binary-cache-store.cc | 43 +++++++++++++++++-------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 270624a1eaf..40db6450e67 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -17,7 +17,20 @@ class IPFSBinaryCacheStore : public BinaryCacheStore std::string cacheUri; std::string daemonUri; - std::string ipfsPath; + std::string ipfsHash; + + enum struct AddrType { IPFS, IPNS } addrType; + + std::string getIpfsPath() { + switch (addrType) { + case AddrType::IPFS: { + return "/ipfs/" + ipfsHash; + } + case AddrType::IPNS: { + return "/ipns/" + ipfsHash; + } + } + } struct State { @@ -35,10 +48,14 @@ class IPFSBinaryCacheStore : public BinaryCacheStore if (cacheUri.back() == '/') cacheUri.pop_back(); - if (hasPrefix(cacheUri, "ipfs://")) - ipfsPath = "/ipfs/" + std::string(cacheUri, 7); - else if (hasPrefix(cacheUri, "ipns://")) - ipfsPath = "/ipns/" + std::string(cacheUri, 7); + if (hasPrefix(cacheUri, "ipfs://")) { + ipfsHash = std::string(cacheUri, 7); + addrType = AddrType::IPFS; + } + else if (hasPrefix(cacheUri, "ipns://")) { + ipfsHash = std::string(cacheUri, 7); + addrType = AddrType::IPNS; + } else throw Error("unknown IPFS URI '%s'", cacheUri); @@ -56,8 +73,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore throw Error("daemon for IPFS is not running properly"); // root should already exist - if (!fileExists("") && hasPrefix(ipfsPath, "/ipfs/")) - throw Error("path '%s' is not found", ipfsPath); + if (!fileExists("") && addrType == AddrType::IPFS) + throw Error("path '%s' is not found", getIpfsPath()); } std::string getUri() override @@ -78,7 +95,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore bool fileExists(const std::string & path) override { - auto uri = daemonUri + "/api/v0/object/stat?offline=true&arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); + auto uri = daemonUri + "/api/v0/object/stat?offline=true&arg=" + getFileTransfer()->urlEncode(getIpfsPath()); FileTransferRequest request(uri); request.post = true; @@ -97,8 +114,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override { - if (hasPrefix(ipfsPath, "/ipfs/")) - throw Error("%s is immutable, cannot modify", ipfsPath); + if (addrType == AddrType::IPFS) + throw Error("%s is immutable, cannot modify", getIpfsPath()); // TODO: use callbacks @@ -120,7 +137,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore state->inProgressUpsert = true; auto uri1 = daemonUri + "/api/v0/object/patch/add-link?offline=true&create=true"; - uri1 += "&arg=" + getFileTransfer()->urlEncode(ipfsPath); + uri1 += "&arg=" + getFileTransfer()->urlEncode(getIpfsPath()); uri1 += "&arg=" + getFileTransfer()->urlEncode(path); uri1 += "&arg=" + getFileTransfer()->urlEncode(addedPath); @@ -133,7 +150,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto newRoot = json2["Hash"]; auto uri2 = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(newRoot); - uri2 += "&key=" + std::string(ipfsPath, 6); + uri2 += "&key=" + std::string(getIpfsPath(), 6); auto req3 = FileTransferRequest(uri2); req3.post = true; @@ -149,7 +166,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void getFile(const std::string & path, Callback> callback) noexcept override { - auto uri = daemonUri + "/api/v0/cat?offline=true&arg=" + getFileTransfer()->urlEncode(ipfsPath + "/" + path); + auto uri = daemonUri + "/api/v0/cat?offline=true&arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); FileTransferRequest request(uri); request.post = true; From b62782dadf42ae69e4bd92c695e00773715304c6 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 14:48:10 -0500 Subject: [PATCH 016/104] Cleanup --- src/libstore/ipfs-binary-cache-store.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 40db6450e67..7f82768aa1f 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -85,9 +85,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void init() override { std::string cacheInfoFile = "nix-cache-info"; - if (!fileExists(cacheInfoFile)) { + if (!fileExists(cacheInfoFile)) upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info"); - } BinaryCacheStore::init(); } From bc012e51917422f2b23eb34c5735d0d403f18832 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 15:38:20 -0500 Subject: [PATCH 017/104] Add sync() to store API this is called at the end of a sequence of addToStores calls to tell the store that it can now commit the data. --- src/libstore/store-api.cc | 2 ++ src/libstore/store-api.hh | 4 ++++ src/nix-store/nix-store.cc | 5 +++++ src/nix/add-to-store.cc | 1 + src/nix/make-content-addressable.cc | 2 ++ 5 files changed, 14 insertions(+) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 095363d0c9b..5f86f4dba14 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -680,6 +680,8 @@ void copyPaths(ref srcStore, ref dstStore, const StorePathSet & st nrDone++; showProgress(); }); + + dstStore->sync(); } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index b1e25fc7d66..7c6ee3595b4 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -680,6 +680,10 @@ public: virtual void createUser(const std::string & userName, uid_t userId) { } + /* Sync writes to commits written data, usually a no-op. */ + virtual void sync() + { }; + protected: Stats stats; diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 3a3060ad82d..bdde54dcff5 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -167,6 +167,8 @@ static void opAdd(Strings opFlags, Strings opArgs) for (auto & i : opArgs) cout << fmt("%s\n", store->printStorePath(store->addToStore(std::string(baseNameOf(i)), i))); + + store->sync(); } @@ -188,6 +190,8 @@ static void opAddFixed(Strings opFlags, Strings opArgs) for (auto & i : opArgs) cout << fmt("%s\n", store->printStorePath(store->addToStore(std::string(baseNameOf(i)), i, recursive, hashAlgo))); + + store->sync(); } @@ -952,6 +956,7 @@ static void opServe(Strings opFlags, Strings opArgs) SizedSource sizedSource(in, info.narSize); store->addToStore(info, sizedSource, NoRepair, NoCheckSigs); + store->sync(); // consume all the data that has been sent before continuing. sizedSource.drainAll(); diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc index f43f774c1c8..8b7a5997a11 100644 --- a/src/nix/add-to-store.cc +++ b/src/nix/add-to-store.cc @@ -53,6 +53,7 @@ struct CmdAddToStore : MixDryRun, StoreCommand if (!dryRun) { auto source = StringSource { *sink.s }; store->addToStore(info, source); + store->sync(); } logger->stdout("%s", store->printStorePath(info.path)); diff --git a/src/nix/make-content-addressable.cc b/src/nix/make-content-addressable.cc index 3e7ff544d6d..96bc119b32f 100644 --- a/src/nix/make-content-addressable.cc +++ b/src/nix/make-content-addressable.cc @@ -100,6 +100,8 @@ struct CmdMakeContentAddressable : StorePathsCommand, MixJSON remappings.insert_or_assign(std::move(path), std::move(info.path)); } + + store->sync(); } }; From 168cb9ee066c11310fc48f3abbdcc8ec8f33b53a Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 15:39:10 -0500 Subject: [PATCH 018/104] Fixup fileExists method in IPFSBinaryCacheStore --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 7f82768aa1f..9c63cfab27f 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -94,7 +94,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore bool fileExists(const std::string & path) override { - auto uri = daemonUri + "/api/v0/object/stat?offline=true&arg=" + getFileTransfer()->urlEncode(getIpfsPath()); + auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); FileTransferRequest request(uri); request.post = true; From 4b24d04bd9ee9f972adc26dcfae1968e3dc3bed8 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 1 Jun 2020 16:03:48 -0400 Subject: [PATCH 019/104] Temporarily reduce CI platform's for sake of Obsidian's own CI Hopefully we can instead pass arguments like Hydra does, soon. (cherry picked from commit 4ced63cd3b5f5be34465a0a46b44a0c3928e1c8a) --- release.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/release.nix b/release.nix index 2a320e1c3bc..23147d67654 100644 --- a/release.nix +++ b/release.nix @@ -1,7 +1,8 @@ { nix ? builtins.fetchGit ./. , nixpkgs ? builtins.fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-20.03-small.tar.gz , officialRelease ? false -, systems ? [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ] +, systems ? [ "x86_64-linux" "x86_64-darwin" ] + # ^ Temporary shorten list for sake of Obsidian's CI. }: let From 816ff043802793deda1b6813ba72c017693c8b89 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 10 Jun 2020 15:40:24 -0500 Subject: [PATCH 020/104] Store IPFS path in state This uses the Sync primitive to store an ipfsPath that is written to as we add stuff to IPNS. IPNS is still optional. When finished, we do a publish on the ipnsPath. --- src/libstore/ipfs-binary-cache-store.cc | 124 ++++++++++++------------ 1 file changed, 63 insertions(+), 61 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 9c63cfab27f..45a7ae7d2b5 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -17,24 +17,15 @@ class IPFSBinaryCacheStore : public BinaryCacheStore std::string cacheUri; std::string daemonUri; - std::string ipfsHash; - - enum struct AddrType { IPFS, IPNS } addrType; - std::string getIpfsPath() { - switch (addrType) { - case AddrType::IPFS: { - return "/ipfs/" + ipfsHash; - } - case AddrType::IPNS: { - return "/ipns/" + ipfsHash; - } - } + auto state(_state.lock()); + return state->ipfsPath; } + std::optional ipnsPath; struct State { - bool inProgressUpsert = false; + std::string ipfsPath; }; Sync _state; @@ -45,19 +36,17 @@ class IPFSBinaryCacheStore : public BinaryCacheStore : BinaryCacheStore(params) , cacheUri(_cacheUri) { + auto state(_state.lock()); + if (cacheUri.back() == '/') cacheUri.pop_back(); - if (hasPrefix(cacheUri, "ipfs://")) { - ipfsHash = std::string(cacheUri, 7); - addrType = AddrType::IPFS; - } - else if (hasPrefix(cacheUri, "ipns://")) { - ipfsHash = std::string(cacheUri, 7); - addrType = AddrType::IPNS; - } + if (hasPrefix(cacheUri, "ipfs://")) + state->ipfsPath = "/ipfs/" + std::string(cacheUri, 7); + else if (hasPrefix(cacheUri, "ipns://")) + ipnsPath = "/ipns/" + std::string(cacheUri, 7); else - throw Error("unknown IPFS URI '%s'", cacheUri); + throw Error("unknown IPNS URI '%s'", cacheUri); std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); std::string ipfsAPIPort(get(params, "port").value_or("5001")); @@ -72,9 +61,19 @@ class IPFSBinaryCacheStore : public BinaryCacheStore if (versionInfo.find("Version") == versionInfo.end()) throw Error("daemon for IPFS is not running properly"); - // root should already exist - if (!fileExists("") && addrType == AddrType::IPFS) - throw Error("path '%s' is not found", getIpfsPath()); + // Resolve the IPNS name to an IPFS object + if (ipnsPath) { + debug("Resolving IPFS object of '%s', this could take a while.", *ipnsPath); + auto uri = daemonUri + "/api/v0/name/resolve?offline=true&arg=" + getFileTransfer()->urlEncode(*ipnsPath); + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; + auto res = getFileTransfer()->download(request); + auto json = nlohmann::json::parse(*res.data); + if (json.find("Path") == json.end()) + throw Error("daemon for IPFS is not running properly"); + state->ipfsPath = json["Path"]; + } } std::string getUri() override @@ -111,52 +110,55 @@ class IPFSBinaryCacheStore : public BinaryCacheStore } } - void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override + // IPNS publish can be slow, we try to do it rarely. + void sync() override { - if (addrType == AddrType::IPFS) - throw Error("%s is immutable, cannot modify", getIpfsPath()); + if (!ipnsPath) + return; - // TODO: use callbacks - - auto req1 = FileTransferRequest(daemonUri + "/api/v0/add"); - req1.data = std::make_shared(data); - req1.post = true; - req1.tries = 1; - try { - auto res1 = getFileTransfer()->upload(req1); - auto json1 = nlohmann::json::parse(*res1.data); + auto state(_state.lock()); - auto addedPath = "/ipfs/" + (std::string) json1["Hash"]; + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, *ipnsPath); - auto state(_state.lock()); + auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + uri += "&key=" + std::string(*ipnsPath, 6); - if (state->inProgressUpsert) - throw Error("a modification to the IPNS is already in progress"); - - state->inProgressUpsert = true; + auto req = FileTransferRequest(uri); + req.post = true; + req.tries = 1; + getFileTransfer()->download(req); + } - auto uri1 = daemonUri + "/api/v0/object/patch/add-link?offline=true&create=true"; - uri1 += "&arg=" + getFileTransfer()->urlEncode(getIpfsPath()); - uri1 += "&arg=" + getFileTransfer()->urlEncode(path); - uri1 += "&arg=" + getFileTransfer()->urlEncode(addedPath); + void addLink(std::string name, std::string ipfsObject) + { + auto state(_state.lock()); - auto req2 = FileTransferRequest(uri1); - req2.post = true; - req2.tries = 1; - auto res2 = getFileTransfer()->download(req2); - auto json2 = nlohmann::json::parse(*res2.data); + auto uri = daemonUri + "/api/v0/object/patch/add-link?create=true"; + uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + uri += "&arg=" + getFileTransfer()->urlEncode(name); + uri += "&arg=" + getFileTransfer()->urlEncode(ipfsObject); - auto newRoot = json2["Hash"]; + auto req = FileTransferRequest(uri); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->download(req); + auto json = nlohmann::json::parse(*res.data); - auto uri2 = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(newRoot); - uri2 += "&key=" + std::string(getIpfsPath(), 6); + state->ipfsPath = "/ipfs/" + (std::string) json["Hash"]; + } - auto req3 = FileTransferRequest(uri2); - req3.post = true; - req3.tries = 1; - getFileTransfer()->download(req3); + void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override + { + // TODO: use callbacks - state->inProgressUpsert = false; + auto req = FileTransferRequest(daemonUri + "/api/v0/add"); + req.data = std::make_shared(data); + req.post = true; + req.tries = 1; + try { + auto res = getFileTransfer()->upload(req); + auto json = nlohmann::json::parse(*res.data); + addLink(path, "/ipfs/" + (std::string) json["Hash"]); } catch (FileTransferError & e) { throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); } @@ -165,7 +167,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void getFile(const std::string & path, Callback> callback) noexcept override { - auto uri = daemonUri + "/api/v0/cat?offline=true&arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); + auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); FileTransferRequest request(uri); request.post = true; From 03a90fc9485b8152a701f84048876a3d299d3751 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 11 Jun 2020 00:16:56 -0400 Subject: [PATCH 021/104] ipnsPath -> optIpnsPath --- src/libstore/ipfs-binary-cache-store.cc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 45a7ae7d2b5..783966471f2 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -21,7 +21,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto state(_state.lock()); return state->ipfsPath; } - std::optional ipnsPath; + std::optional optIpnsPath; struct State { @@ -44,7 +44,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore if (hasPrefix(cacheUri, "ipfs://")) state->ipfsPath = "/ipfs/" + std::string(cacheUri, 7); else if (hasPrefix(cacheUri, "ipns://")) - ipnsPath = "/ipns/" + std::string(cacheUri, 7); + optIpnsPath = "/ipns/" + std::string(cacheUri, 7); else throw Error("unknown IPNS URI '%s'", cacheUri); @@ -62,9 +62,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore throw Error("daemon for IPFS is not running properly"); // Resolve the IPNS name to an IPFS object - if (ipnsPath) { - debug("Resolving IPFS object of '%s', this could take a while.", *ipnsPath); - auto uri = daemonUri + "/api/v0/name/resolve?offline=true&arg=" + getFileTransfer()->urlEncode(*ipnsPath); + if (optIpnsPath) { + debug("Resolving IPFS object of '%s', this could take a while.", *optIpnsPath); + auto uri = daemonUri + "/api/v0/name/resolve?offline=true&arg=" + getFileTransfer()->urlEncode(*optIpnsPath); FileTransferRequest request(uri); request.post = true; request.tries = 1; @@ -113,15 +113,15 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // IPNS publish can be slow, we try to do it rarely. void sync() override { - if (!ipnsPath) + if (!optIpnsPath) return; auto state(_state.lock()); - debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, *ipnsPath); + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, *optIpnsPath); auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); - uri += "&key=" + std::string(*ipnsPath, 6); + uri += "&key=" + std::string(*optIpnsPath, 6); auto req = FileTransferRequest(uri); req.post = true; From 25c5ced81d6af521b035349c52035aa19d9e4147 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 11 Jun 2020 00:29:34 -0400 Subject: [PATCH 022/104] Avoid too many `optIpnsPath` It isn't very safe, so best to do it once right after the condition. If only we had real pattern matching... --- src/libstore/ipfs-binary-cache-store.cc | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 783966471f2..8de3b035934 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -63,8 +63,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // Resolve the IPNS name to an IPFS object if (optIpnsPath) { - debug("Resolving IPFS object of '%s', this could take a while.", *optIpnsPath); - auto uri = daemonUri + "/api/v0/name/resolve?offline=true&arg=" + getFileTransfer()->urlEncode(*optIpnsPath); + auto ipnsPath = *optIpnsPath; + debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); + auto uri = daemonUri + "/api/v0/name/resolve?offline=true&arg=" + getFileTransfer()->urlEncode(ipnsPath); FileTransferRequest request(uri); request.post = true; request.tries = 1; @@ -115,13 +116,14 @@ class IPFSBinaryCacheStore : public BinaryCacheStore { if (!optIpnsPath) return; + auto ipnsPath = *optIpnsPath; auto state(_state.lock()); - debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, *optIpnsPath); + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); - uri += "&key=" + std::string(*optIpnsPath, 6); + uri += "&key=" + std::string(ipnsPath, 6); auto req = FileTransferRequest(uri); req.post = true; From 1f88199285cdd99a8c38fb2ec940ece845c70b86 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 11 Jun 2020 10:40:54 -0400 Subject: [PATCH 023/104] WIP test suite automatization --- tests/ipfs.sh | 145 +++++++++++++++++++++++++++++++++++++++++++++++++ tests/local.mk | 75 +++++++++++++------------ 2 files changed, 185 insertions(+), 35 deletions(-) create mode 100644 tests/ipfs.sh diff --git a/tests/ipfs.sh b/tests/ipfs.sh new file mode 100644 index 00000000000..3df3543ed36 --- /dev/null +++ b/tests/ipfs.sh @@ -0,0 +1,145 @@ +source common.sh + +################################################################################ +## Check that the ipfs daemon is enabled in your environment +################################################################################ + +# To see if ipfs is connected to the network, we check if we can see some peers +# other than ourselves. +NPEERS=$(ipfs swarm peers | wc -l) +echo $NPEERS +if (( $NPEERS < 2 )); then + echo "The ipfs daemon doesn't seem to be enabled (can't find peers)" + exit 1 +fi + +################################################################################ +## Create the folders for the source and destination stores +################################################################################ + +IPFS_TESTS=$TEST_ROOT/ipfs_tests + +# Here we define some store locations, one for the initial store we upload, and +# the other three for the destination stores to which we'll copy (one for each +# method) +IPFS_SRC_STORE=$IPFS_TESTS/ipfs_source_store + +IPFS_DST_HTTP_STORE=$IPFS_TESTS/ipfs_dest_http_store +IPFS_DST_HTTP_LOCAL_STORE=$IPFS_TESTS/ipfs_dest_http_local_store +IPFS_DST_IPFS_STORE=$IPFS_TESTS/ipfs_dest_ipfs_store +IPFS_DST_IPNS_STORE=$IPFS_TESTS/ipfs_dest_ipns_store + +################################################################################ +## Generate the keys to sign the store +################################################################################ + +SIGNING_KEY_NAME='nixcache.for.ipfs-1' +SIGNING_KEY_PRI_FILE='nix-cache-key.sec' +SIGNING_KEY_PUB_FILE='nix-cache-key.pub' + +nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $SIGNING_KEY_PUB_FILE + +################################################################################ +## Create, sign and upload the source store +################################################################################ + +mkdir -p $IPFS_SRC_STORE +BUILD_COMMAND='nix-build "" -A hello.src' + +nix copy --to file://$IPFS_SRC_STORE $($BUILD_COMMAND) + +nix sign-paths --store file://$IPFS_SRC_STORE \ + -k ~/nix-cache-key.sec \ + $($BUILD_COMMAND) -r + +IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') + + +# ################################################################################ +# ## Create the http store and download the derivation there +# ################################################################################ + +# mkdir IPFS_DST_HTTP_STORE + +# IPFS_HTTP_PREFIX='https://gateway.ipfs.io/ipfs' + + +# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +# --option substituters $IPFS_HTTP_PREFIX/$IPFS_HASH \ +# --store $IPFS_DST_HTTP_STORE \ +# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ +# | tail -2 | head -1 | awk '{print $5}') + +# ### TODO +# ### Check that the download location coincides + +# ################################################################################ +# ## Create the local http store and download the derivation there +# ################################################################################ + +# mkdir IPFS_DST_HTTP_LOCAL_STORE + +# IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' + +# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +# --option substituters $IPFS_HTTP_LOCAL_PREFIX/$IPFS_HASH \ +# --store $IPFS_DST_HTTP_LOCAL_STORE \ +# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ +# | tail -2 | head -1 | awk '{print $5}') + +# ### TODO +# ### Check that the download location coincides + +# ################################################################################ +# ## Create the ipfs store and download the derivation there +# ################################################################################ + +# mkdir IPFS_DST_IPFS_STORE + +# IPFS_IPFS_PREFIX='/ipfs' + +# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +# --option substituters $IPFS_IPFS_PREFIX/$IPFS_HASH \ +# --store $IPFS_DST_IPFS_STORE \ +# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ +# | tail -2 | head -1 | awk '{print $5}') + +# ### TODO +# ### Check that the download location coincides + +# ################################################################################ +# ## Create the ipns store and download the derivation there +# ################################################################################ + +# # First I have to publish: +# IPNS_ID=$(ipfs name publish $IPFS_HASH | awk '{print substr($3,1,length($3)-1)}') + +# mkdir IPFS_DST_IPNS_STORE +# IPFS_IPNS_PREFIX='/ipns' + +# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +# --option substituters $IPFS_IPNS_PREFIX/$IPNS_ID \ +# --store $IPFS_DST_IPFS_STORE \ +# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ +# | tail -2 | head -1 | awk '{print $5}') + +# ### TODO +# ### Check that the download location coincides + +################################################################################ +## Cleanup +################################################################################ + +# Delete the keys used to sign the store +rm $SIGNING_KEY_PRI_FILE $SIGNING_KEY_PUB_FILE + +# Remove all the stores +rm -rf $IPFS_SRC_STORE \ + $IPFS_DST_HTTP_STORE $IPFS_DST_HTTP_LOCAL_STORE \ + $IPFS_DST_IPFS_STORE $IPFS_DST_IPNS_STORE + +# Remove the result of the build +rm result + + +exit 1 diff --git a/tests/local.mk b/tests/local.mk index 536661af88e..e5062b1ba3c 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -1,38 +1,43 @@ -nix_tests = \ - init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ - config.sh \ - gc.sh \ - gc-concurrent.sh \ - gc-auto.sh \ - referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ - gc-runtime.sh check-refs.sh filter-source.sh \ - remote-store.sh export.sh export-graph.sh \ - timeout.sh secure-drv-outputs.sh nix-channel.sh \ - multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ - binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ - check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ - placeholders.sh nix-shell.sh \ - linux-sandbox.sh \ - build-dry.sh \ - build-remote.sh \ - nar-access.sh \ - structured-attrs.sh \ - fetchGit.sh \ - fetchGitRefs.sh \ - fetchGitSubmodules.sh \ - fetchMercurial.sh \ - signing.sh \ - shell.sh \ - brotli.sh \ - pure-eval.sh \ - check.sh \ - plugins.sh \ - search.sh \ - nix-copy-ssh.sh \ - post-hook.sh \ - function-trace.sh \ - recursive.sh - # parallel.sh +nix_tests = ipfs.sh \ +# add.sh + +# nix_tests = add.sh + +# nix_tests = \ +# init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ +# config.sh \ +# gc.sh \ +# gc-concurrent.sh \ +# gc-auto.sh \ +# referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ +# gc-runtime.sh check-refs.sh filter-source.sh \ +# remote-store.sh export.sh export-graph.sh \ +# timeout.sh secure-drv-outputs.sh nix-channel.sh \ +# multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ +# binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ +# check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ +# placeholders.sh nix-shell.sh \ +# linux-sandbox.sh \ +# build-dry.sh \ +# build-remote.sh \ +# nar-access.sh \ +# structured-attrs.sh \ +# fetchGit.sh \ +# fetchGitRefs.sh \ +# fetchGitSubmodules.sh \ +# fetchMercurial.sh \ +# signing.sh \ +# shell.sh \ +# brotli.sh \ +# pure-eval.sh \ +# check.sh \ +# plugins.sh \ +# search.sh \ +# nix-copy-ssh.sh \ +# post-hook.sh \ +# function-trace.sh \ +# recursive.sh +# # parallel.sh install-tests += $(foreach x, $(nix_tests), tests/$(x)) From 33c61498e973a5fca8a2f8b632f245d69b01f5da Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 11 Jun 2020 14:45:27 -0400 Subject: [PATCH 024/104] With a derivation that doesn't use nixpkgs --- tests/ipfs.sh | 6 ++++-- tests/simple-derivation-builder.sh | 3 +++ tests/simple-derivation.nix | 12 ++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) create mode 100755 tests/simple-derivation-builder.sh create mode 100644 tests/simple-derivation.nix diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 3df3543ed36..a982c92e261 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -44,9 +44,11 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE -BUILD_COMMAND='nix-build "" -A hello.src' +# BUILD_COMMAND="nix-build '' -A hello.src" -nix copy --to file://$IPFS_SRC_STORE $($BUILD_COMMAND) +nix copy --to file://$IPFS_SRC_STORE \ + --experimental-features nix-command \ + $(nix-build ./simple-derivation.nix) nix sign-paths --store file://$IPFS_SRC_STORE \ -k ~/nix-cache-key.sec \ diff --git a/tests/simple-derivation-builder.sh b/tests/simple-derivation-builder.sh new file mode 100755 index 00000000000..a47a2d79902 --- /dev/null +++ b/tests/simple-derivation-builder.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +ln -s $input $out diff --git a/tests/simple-derivation.nix b/tests/simple-derivation.nix new file mode 100644 index 00000000000..883e715cf7d --- /dev/null +++ b/tests/simple-derivation.nix @@ -0,0 +1,12 @@ +# This derivation doesn't depend from nixpkgs, so that we can build it during +# our test suite. + +with import ./config.nix; + +mkDerivation { + name = "simple-test-derivation"; + builder = builtins.toFile "builder" "ln -s $input $out"; + # builder = "/home/carlo/code/obsidian/nix/tests/simple-derivation-builder.sh"; + input = + builtins.fetchTarball("http://alpha.gnu.org/gnu/hello/hello-2.6.90.tar.gz"); +} From 25f71d39d9c29f6cb65b7a0e1ed10971b544107f Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 11 Jun 2020 15:30:39 -0400 Subject: [PATCH 025/104] need to run init.sh first, sigh --- tests/local.mk | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/local.mk b/tests/local.mk index e5062b1ba3c..a8c45cc73e3 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -1,5 +1,4 @@ -nix_tests = ipfs.sh \ -# add.sh +nix_tests = init.sh check.sh ipfs.sh # nix_tests = add.sh From 8a3c07326bb5e9ea79c4088729c23ba6ec4e7a21 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 11 Jun 2020 17:18:08 -0400 Subject: [PATCH 026/104] Complete the tests for ipfs --- tests/ipfs.sh | 127 +++++++++++------------------ tests/local.mk | 75 ++++++++--------- tests/simple-derivation-builder.sh | 3 - tests/simple-derivation.nix | 12 --- 4 files changed, 84 insertions(+), 133 deletions(-) delete mode 100755 tests/simple-derivation-builder.sh delete mode 100644 tests/simple-derivation.nix diff --git a/tests/ipfs.sh b/tests/ipfs.sh index a982c92e261..239e1f42f0b 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -1,5 +1,9 @@ source common.sh +# This are for ./fixed.nix +export IMPURE_VAR1=foo +export IMPURE_VAR2=bar + ################################################################################ ## Check that the ipfs daemon is enabled in your environment ################################################################################ @@ -18,6 +22,7 @@ fi ################################################################################ IPFS_TESTS=$TEST_ROOT/ipfs_tests +mkdir $IPFS_TESTS # Here we define some store locations, one for the initial store we upload, and # the other three for the destination stores to which we'll copy (one for each @@ -33,9 +38,9 @@ IPFS_DST_IPNS_STORE=$IPFS_TESTS/ipfs_dest_ipns_store ## Generate the keys to sign the store ################################################################################ -SIGNING_KEY_NAME='nixcache.for.ipfs-1' -SIGNING_KEY_PRI_FILE='nix-cache-key.sec' -SIGNING_KEY_PUB_FILE='nix-cache-key.pub' +SIGNING_KEY_NAME=nixcache.for.ipfs-1 +SIGNING_KEY_PRI_FILE=$IPFS_TESTS/nix-cache-key.sec +SIGNING_KEY_PUB_FILE=$IPFS_TESTS/nix-cache-key.pub nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $SIGNING_KEY_PUB_FILE @@ -44,104 +49,68 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE -# BUILD_COMMAND="nix-build '' -A hello.src" +# BUILD_COMMAND="nix-build ./dependencies.nix -A input1_drv" +BUILD_COMMAND="nix-build ./fixed.nix -A good" nix copy --to file://$IPFS_SRC_STORE \ - --experimental-features nix-command \ - $(nix-build ./simple-derivation.nix) + $($BUILD_COMMAND) nix sign-paths --store file://$IPFS_SRC_STORE \ - -k ~/nix-cache-key.sec \ + -k $SIGNING_KEY_PRI_FILE \ $($BUILD_COMMAND) -r IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') +################################################################################ +## Create the local http store and download the derivation there +################################################################################ -# ################################################################################ -# ## Create the http store and download the derivation there -# ################################################################################ - -# mkdir IPFS_DST_HTTP_STORE - -# IPFS_HTTP_PREFIX='https://gateway.ipfs.io/ipfs' - - -# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ -# --option substituters $IPFS_HTTP_PREFIX/$IPFS_HASH \ -# --store $IPFS_DST_HTTP_STORE \ -# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ -# | tail -2 | head -1 | awk '{print $5}') - -# ### TODO -# ### Check that the download location coincides - -# ################################################################################ -# ## Create the local http store and download the derivation there -# ################################################################################ - -# mkdir IPFS_DST_HTTP_LOCAL_STORE - -# IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' - -# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ -# --option substituters $IPFS_HTTP_LOCAL_PREFIX/$IPFS_HASH \ -# --store $IPFS_DST_HTTP_LOCAL_STORE \ -# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ -# | tail -2 | head -1 | awk '{print $5}') +mkdir $IPFS_DST_HTTP_LOCAL_STORE -# ### TODO -# ### Check that the download location coincides +IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' -# ################################################################################ -# ## Create the ipfs store and download the derivation there -# ################################################################################ +DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ + --option substituters $IPFS_HTTP_LOCAL_PREFIX/$IPFS_HASH \ + --store $IPFS_DST_HTTP_LOCAL_STORE \ + --no-out-link \ + -j0 \ + --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) -# mkdir IPFS_DST_IPFS_STORE +################################################################################ +## Create the ipfs store and download the derivation there +################################################################################ -# IPFS_IPFS_PREFIX='/ipfs' +mkdir $IPFS_DST_IPFS_STORE -# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ -# --option substituters $IPFS_IPFS_PREFIX/$IPFS_HASH \ -# --store $IPFS_DST_IPFS_STORE \ -# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ -# | tail -2 | head -1 | awk '{print $5}') +IPFS_IPFS_PREFIX='/ipfs' -# ### TODO -# ### Check that the download location coincides +DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ + --option substituters $IPFS_IPFS_PREFIX/$IPFS_HASH \ + --store $IPFS_DST_IPFS_STORE \ + --no-out-link \ + -j0 \ + --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) -# ################################################################################ -# ## Create the ipns store and download the derivation there -# ################################################################################ -# # First I have to publish: -# IPNS_ID=$(ipfs name publish $IPFS_HASH | awk '{print substr($3,1,length($3)-1)}') +################################################################################ +## Create the ipns store and download the derivation there +################################################################################ -# mkdir IPFS_DST_IPNS_STORE -# IPFS_IPNS_PREFIX='/ipns' +# First I have to publish: +IPNS_ID=$(ipfs name publish $IPFS_HASH | awk '{print substr($3,1,length($3)-1)}') -# DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ -# --option substituters $IPFS_IPNS_PREFIX/$IPNS_ID \ -# --store $IPFS_DST_IPFS_STORE \ -# --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) \ -# | tail -2 | head -1 | awk '{print $5}') +mkdir $IPFS_DST_IPNS_STORE +IPFS_IPNS_PREFIX='/ipns' -# ### TODO -# ### Check that the download location coincides +DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ + --option substituters $IPFS_IPNS_PREFIX/$IPNS_ID \ + --store $IPFS_DST_IPNS_STORE \ + --no-out-link \ + -j0 \ + --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) ################################################################################ ## Cleanup ################################################################################ -# Delete the keys used to sign the store -rm $SIGNING_KEY_PRI_FILE $SIGNING_KEY_PUB_FILE - -# Remove all the stores -rm -rf $IPFS_SRC_STORE \ - $IPFS_DST_HTTP_STORE $IPFS_DST_HTTP_LOCAL_STORE \ - $IPFS_DST_IPFS_STORE $IPFS_DST_IPNS_STORE - -# Remove the result of the build -rm result - - -exit 1 +# The cleanup is done automatically by nix/tests/init.sh diff --git a/tests/local.mk b/tests/local.mk index a8c45cc73e3..f8af448d5df 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -1,42 +1,39 @@ -nix_tests = init.sh check.sh ipfs.sh - -# nix_tests = add.sh - -# nix_tests = \ -# init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ -# config.sh \ -# gc.sh \ -# gc-concurrent.sh \ -# gc-auto.sh \ -# referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ -# gc-runtime.sh check-refs.sh filter-source.sh \ -# remote-store.sh export.sh export-graph.sh \ -# timeout.sh secure-drv-outputs.sh nix-channel.sh \ -# multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ -# binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ -# check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ -# placeholders.sh nix-shell.sh \ -# linux-sandbox.sh \ -# build-dry.sh \ -# build-remote.sh \ -# nar-access.sh \ -# structured-attrs.sh \ -# fetchGit.sh \ -# fetchGitRefs.sh \ -# fetchGitSubmodules.sh \ -# fetchMercurial.sh \ -# signing.sh \ -# shell.sh \ -# brotli.sh \ -# pure-eval.sh \ -# check.sh \ -# plugins.sh \ -# search.sh \ -# nix-copy-ssh.sh \ -# post-hook.sh \ -# function-trace.sh \ -# recursive.sh -# # parallel.sh +nix_tests = \ + init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ + config.sh \ + gc.sh \ + gc-concurrent.sh \ + gc-auto.sh \ + referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ + gc-runtime.sh check-refs.sh filter-source.sh \ + remote-store.sh export.sh export-graph.sh \ + timeout.sh secure-drv-outputs.sh nix-channel.sh \ + multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ + binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ + check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ + placeholders.sh nix-shell.sh \ + linux-sandbox.sh \ + build-dry.sh \ + build-remote.sh \ + nar-access.sh \ + structured-attrs.sh \ + fetchGit.sh \ + fetchGitRefs.sh \ + fetchGitSubmodules.sh \ + fetchMercurial.sh \ + signing.sh \ + shell.sh \ + brotli.sh \ + pure-eval.sh \ + check.sh \ + plugins.sh \ + search.sh \ + nix-copy-ssh.sh \ + post-hook.sh \ + function-trace.sh \ + recursive.sh \ + ipfs.sh + # parallel.sh install-tests += $(foreach x, $(nix_tests), tests/$(x)) diff --git a/tests/simple-derivation-builder.sh b/tests/simple-derivation-builder.sh deleted file mode 100755 index a47a2d79902..00000000000 --- a/tests/simple-derivation-builder.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -ln -s $input $out diff --git a/tests/simple-derivation.nix b/tests/simple-derivation.nix deleted file mode 100644 index 883e715cf7d..00000000000 --- a/tests/simple-derivation.nix +++ /dev/null @@ -1,12 +0,0 @@ -# This derivation doesn't depend from nixpkgs, so that we can build it during -# our test suite. - -with import ./config.nix; - -mkDerivation { - name = "simple-test-derivation"; - builder = builtins.toFile "builder" "ln -s $input $out"; - # builder = "/home/carlo/code/obsidian/nix/tests/simple-derivation-builder.sh"; - input = - builtins.fetchTarball("http://alpha.gnu.org/gnu/hello/hello-2.6.90.tar.gz"); -} From 0da205bdc097135f0972fe373a7446d51fa1fd5f Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 11 Jun 2020 17:34:57 -0400 Subject: [PATCH 027/104] Update prefixes for ipfs and ipns in test suite --- tests/ipfs.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 239e1f42f0b..864519acf64 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -82,10 +82,10 @@ DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ mkdir $IPFS_DST_IPFS_STORE -IPFS_IPFS_PREFIX='/ipfs' +IPFS_IPFS_PREFIX='ipfs://' DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ - --option substituters $IPFS_IPFS_PREFIX/$IPFS_HASH \ + --option substituters $IPFS_IPFS_PREFIX$IPFS_HASH \ --store $IPFS_DST_IPFS_STORE \ --no-out-link \ -j0 \ @@ -100,10 +100,10 @@ DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ IPNS_ID=$(ipfs name publish $IPFS_HASH | awk '{print substr($3,1,length($3)-1)}') mkdir $IPFS_DST_IPNS_STORE -IPFS_IPNS_PREFIX='/ipns' +IPFS_IPNS_PREFIX='ipns://' DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ - --option substituters $IPFS_IPNS_PREFIX/$IPNS_ID \ + --option substituters $IPFS_IPNS_PREFIX$IPNS_ID \ --store $IPFS_DST_IPNS_STORE \ --no-out-link \ -j0 \ From 753ed622af9b704594717b35575a02017e4a5f2c Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 11 Jun 2020 21:45:53 +0000 Subject: [PATCH 028/104] Add ipfs as a dep for tests Eventually we should separate test deps. --- release-common.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/release-common.nix b/release-common.nix index 7e7de005d2f..16832618546 100644 --- a/release-common.nix +++ b/release-common.nix @@ -70,7 +70,8 @@ rec { sha256 = "1pij0v449p166f9l29x7ppzk8j7g9k9mp15ilh5qxp29c7fnvxy2"; }) ]; */ - })); + })) + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ipfs; propagatedDeps = [ (boehmgc.override { enableLargeConfig = true; }) From 47502232585acde19100cf2c3a9fa47ca49a98a6 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 11 Jun 2020 18:42:58 -0400 Subject: [PATCH 029/104] Restart ipfs when the daemon is not running --- tests/ipfs.sh | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 864519acf64..8dc96354138 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -5,16 +5,20 @@ export IMPURE_VAR1=foo export IMPURE_VAR2=bar ################################################################################ -## Check that the ipfs daemon is enabled in your environment +## Check that the ipfs daemon is present and enabled in your environment ################################################################################ +if [[ -z $(type -p ipfs) ]]; then + echo "Ipfs not installed; skipping ipfs tests" + exit 99 +fi + # To see if ipfs is connected to the network, we check if we can see some peers # other than ourselves. -NPEERS=$(ipfs swarm peers | wc -l) -echo $NPEERS -if (( $NPEERS < 2 )); then - echo "The ipfs daemon doesn't seem to be enabled (can't find peers)" - exit 1 +if (! (ipfs log ls)); +then + echo "Ipfs daemon not detected; initializing.." + ipfs daemon --offline & fi ################################################################################ @@ -97,7 +101,7 @@ DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ ################################################################################ # First I have to publish: -IPNS_ID=$(ipfs name publish $IPFS_HASH | awk '{print substr($3,1,length($3)-1)}') +IPNS_ID=$(ipfs name publish $IPFS_HASH --allow-offline | awk '{print substr($3,1,length($3)-1)}') mkdir $IPFS_DST_IPNS_STORE IPFS_IPNS_PREFIX='ipns://' From b2eccb98f20d17748fe6d64e942f62a6ecdb879d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 12 Jun 2020 15:23:24 +0000 Subject: [PATCH 030/104] Sometimes read permissions dissapear on the test data for some reason --- tests/init.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/init.sh b/tests/init.sh index c62c4856a93..8b9ded4a446 100644 --- a/tests/init.sh +++ b/tests/init.sh @@ -2,7 +2,7 @@ source common.sh test -n "$TEST_ROOT" if test -d "$TEST_ROOT"; then - chmod -R u+w "$TEST_ROOT" + chmod -R u+rw "$TEST_ROOT" rm -rf "$TEST_ROOT" fi mkdir "$TEST_ROOT" From 0068f7d6783da6391d2bac1398a6706e90dfdc79 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 12 Jun 2020 15:23:51 +0000 Subject: [PATCH 031/104] Ensure IPFS daemon is killed after test --- tests/ipfs.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 8dc96354138..bf2886400f9 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -18,7 +18,10 @@ fi if (! (ipfs log ls)); then echo "Ipfs daemon not detected; initializing.." + ipfs init ipfs daemon --offline & + pidIpfsDaemon=$! + trap "kill -9 $pidIpfsDaemon" EXIT fi ################################################################################ @@ -112,9 +115,3 @@ DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ --no-out-link \ -j0 \ --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) - -################################################################################ -## Cleanup -################################################################################ - -# The cleanup is done automatically by nix/tests/init.sh From 5352bcbfbc3dfe7bb9d55fb6db392f9f0b2eaf7a Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 12 Jun 2020 16:12:20 +0000 Subject: [PATCH 032/104] Just run ipfs tests on linux for now, sigh Not sure what this `nix copy` problem is. I'm pretty sure it's a preexistsing bug. --- release-common.nix | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/release-common.nix b/release-common.nix index 16832618546..9c212c9b575 100644 --- a/release-common.nix +++ b/release-common.nix @@ -71,7 +71,9 @@ rec { }) ]; */ })) - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ipfs; + # TODO fix `nix copy` on darwin + # ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ipfs; + # ++ lib.optional stdenv.isLinux ipfs; propagatedDeps = [ (boehmgc.override { enableLargeConfig = true; }) From 7599cf040d0a3f03a593169fd7f48030e739e476 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 12 Jun 2020 16:13:52 +0000 Subject: [PATCH 033/104] Fix syntax error from bad paste --- release-common.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release-common.nix b/release-common.nix index 9c212c9b575..e9e85956af4 100644 --- a/release-common.nix +++ b/release-common.nix @@ -73,7 +73,7 @@ rec { })) # TODO fix `nix copy` on darwin # ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ipfs; - # ++ lib.optional stdenv.isLinux ipfs; + ++ lib.optional stdenv.isLinux ipfs; propagatedDeps = [ (boehmgc.override { enableLargeConfig = true; }) From 4348615044e0058114e81a4ea7e588725aca9a7c Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 12 Jun 2020 19:49:32 +0000 Subject: [PATCH 034/104] Remove `NIX_REMOTE=local` from ipfs test The test infra will handle NIX_REMOTE --- tests/ipfs.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index bf2886400f9..3fb08bf49a9 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -76,7 +76,7 @@ mkdir $IPFS_DST_HTTP_LOCAL_STORE IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' -DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +DOWNLOAD_LOCATION=$($BUILD_COMMAND \ --option substituters $IPFS_HTTP_LOCAL_PREFIX/$IPFS_HASH \ --store $IPFS_DST_HTTP_LOCAL_STORE \ --no-out-link \ @@ -91,7 +91,7 @@ mkdir $IPFS_DST_IPFS_STORE IPFS_IPFS_PREFIX='ipfs://' -DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +DOWNLOAD_LOCATION=$($BUILD_COMMAND \ --option substituters $IPFS_IPFS_PREFIX$IPFS_HASH \ --store $IPFS_DST_IPFS_STORE \ --no-out-link \ @@ -109,7 +109,7 @@ IPNS_ID=$(ipfs name publish $IPFS_HASH --allow-offline | awk '{print substr($3,1 mkdir $IPFS_DST_IPNS_STORE IPFS_IPNS_PREFIX='ipns://' -DOWNLOAD_LOCATION=$(NIX_REMOTE=local $BUILD_COMMAND \ +DOWNLOAD_LOCATION=$($BUILD_COMMAND \ --option substituters $IPFS_IPNS_PREFIX$IPNS_ID \ --store $IPFS_DST_IPNS_STORE \ --no-out-link \ From 67c63b954cccbdbaf17947b3d57f79a55d9f98c5 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 12 Jun 2020 15:51:39 -0500 Subject: [PATCH 035/104] Only call build once in tests/ipfs.sh --- tests/ipfs.sh | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 3fb08bf49a9..5fd97741fa6 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -56,15 +56,12 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE -# BUILD_COMMAND="nix-build ./dependencies.nix -A input1_drv" -BUILD_COMMAND="nix-build ./fixed.nix -A good" +storePaths=$(nix-build ./fixed.nix -A good) -nix copy --to file://$IPFS_SRC_STORE \ - $($BUILD_COMMAND) +nix copy --to file://$IPFS_SRC_STORE $storePaths nix sign-paths --store file://$IPFS_SRC_STORE \ - -k $SIGNING_KEY_PRI_FILE \ - $($BUILD_COMMAND) -r + -k $SIGNING_KEY_PRI_FILE storePaths IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') From e1d00917ab4a1ad38da95636849c356bfad2b224 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 12 Jun 2020 15:52:01 -0500 Subject: [PATCH 036/104] Clear store when beginning ipfs test --- tests/ipfs.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 5fd97741fa6..307958b4df6 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -24,6 +24,8 @@ then trap "kill -9 $pidIpfsDaemon" EXIT fi +clearStore + ################################################################################ ## Create the folders for the source and destination stores ################################################################################ From 4f3a4c9e354647e26907a3e99ccdded35015ccb5 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 12 Jun 2020 16:44:16 -0500 Subject: [PATCH 037/104] Fix typo in ipfs.sh --- tests/ipfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 307958b4df6..adeef876cad 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -63,7 +63,7 @@ storePaths=$(nix-build ./fixed.nix -A good) nix copy --to file://$IPFS_SRC_STORE $storePaths nix sign-paths --store file://$IPFS_SRC_STORE \ - -k $SIGNING_KEY_PRI_FILE storePaths + -k $SIGNING_KEY_PRI_FILE $storePaths IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') From 4b83835502b99764f7200eac4d7c427366238391 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 12 Jun 2020 17:52:25 -0500 Subject: [PATCH 038/104] Fixup unset BUILD_COMMAND --- tests/ipfs.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index adeef876cad..4841a7ccc1a 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -75,7 +75,7 @@ mkdir $IPFS_DST_HTTP_LOCAL_STORE IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' -DOWNLOAD_LOCATION=$($BUILD_COMMAND \ +DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ --option substituters $IPFS_HTTP_LOCAL_PREFIX/$IPFS_HASH \ --store $IPFS_DST_HTTP_LOCAL_STORE \ --no-out-link \ @@ -90,7 +90,7 @@ mkdir $IPFS_DST_IPFS_STORE IPFS_IPFS_PREFIX='ipfs://' -DOWNLOAD_LOCATION=$($BUILD_COMMAND \ +DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ --option substituters $IPFS_IPFS_PREFIX$IPFS_HASH \ --store $IPFS_DST_IPFS_STORE \ --no-out-link \ @@ -108,7 +108,7 @@ IPNS_ID=$(ipfs name publish $IPFS_HASH --allow-offline | awk '{print substr($3,1 mkdir $IPFS_DST_IPNS_STORE IPFS_IPNS_PREFIX='ipns://' -DOWNLOAD_LOCATION=$($BUILD_COMMAND \ +DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ --option substituters $IPFS_IPNS_PREFIX$IPNS_ID \ --store $IPFS_DST_IPNS_STORE \ --no-out-link \ From 3cb4a0baadb0e3f0616b1fc8367452334dbf20bf Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Sat, 13 Jun 2020 00:29:35 -0500 Subject: [PATCH 039/104] Try to make /nar before nix copy in tests/ipfs.sh --- tests/ipfs.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 4841a7ccc1a..01765b5446d 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -58,6 +58,7 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE +mkdir $IPFS_SRC_STORE/nar storePaths=$(nix-build ./fixed.nix -A good) nix copy --to file://$IPFS_SRC_STORE $storePaths From ad96d2111e4dd8a9011bb66d790c964df6c8d296 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 13 Jun 2020 14:37:07 +0000 Subject: [PATCH 040/104] Revert "Just run ipfs tests on linux for now, sigh" This reverts commit 5352bcbfbc3dfe7bb9d55fb6db392f9f0b2eaf7a. --- release-common.nix | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/release-common.nix b/release-common.nix index e9e85956af4..16832618546 100644 --- a/release-common.nix +++ b/release-common.nix @@ -71,9 +71,7 @@ rec { }) ]; */ })) - # TODO fix `nix copy` on darwin - # ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ipfs; - ++ lib.optional stdenv.isLinux ipfs; + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) ipfs; propagatedDeps = [ (boehmgc.override { enableLargeConfig = true; }) From 08ee3665210d1958e6c3bd7244c7a7078d9d94de Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 13 Jun 2020 15:44:57 +0000 Subject: [PATCH 041/104] ipfs test: Inline some vars used once --- tests/ipfs.sh | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 01765b5446d..257fa81a7c2 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -89,10 +89,8 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ mkdir $IPFS_DST_IPFS_STORE -IPFS_IPFS_PREFIX='ipfs://' - DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ - --option substituters $IPFS_IPFS_PREFIX$IPFS_HASH \ + --option substituters 'ipfs://'$IPFS_HASH \ --store $IPFS_DST_IPFS_STORE \ --no-out-link \ -j0 \ @@ -107,10 +105,9 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ IPNS_ID=$(ipfs name publish $IPFS_HASH --allow-offline | awk '{print substr($3,1,length($3)-1)}') mkdir $IPFS_DST_IPNS_STORE -IPFS_IPNS_PREFIX='ipns://' DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ - --option substituters $IPFS_IPNS_PREFIX$IPNS_ID \ + --option substituters 'ipns://'$IPNS_ID \ --store $IPFS_DST_IPNS_STORE \ --no-out-link \ -j0 \ From 953ceba4cd6a38c439dc737eee525a1fc7ffc5d6 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 13 Jun 2020 12:53:41 -0400 Subject: [PATCH 042/104] Identify real issue (#3695) and hack around it --- tests/ipfs.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 257fa81a7c2..9cad1c381f6 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -58,10 +58,13 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE -mkdir $IPFS_SRC_STORE/nar -storePaths=$(nix-build ./fixed.nix -A good) +storePaths=$(nix-build ./fixed.nix -A good | sort | uniq) -nix copy --to file://$IPFS_SRC_STORE $storePaths +# Hack around https://github.com/NixOS/nix/issues/3695 +for path in $storePaths; do + nix copy --to file://$IPFS_SRC_STORE $path +done +unset path nix sign-paths --store file://$IPFS_SRC_STORE \ -k $SIGNING_KEY_PRI_FILE $storePaths From 77d27f05147377bdd6caea138c52afee70e082eb Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 11:20:49 -0400 Subject: [PATCH 043/104] Check that ipfs and ipns hashes correspond --- src/libstore/ipfs-binary-cache-store.cc | 31 +++++++++++++++++-------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 8de3b035934..a74acd067e3 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -64,16 +64,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // Resolve the IPNS name to an IPFS object if (optIpnsPath) { auto ipnsPath = *optIpnsPath; - debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); - auto uri = daemonUri + "/api/v0/name/resolve?offline=true&arg=" + getFileTransfer()->urlEncode(ipnsPath); - FileTransferRequest request(uri); - request.post = true; - request.tries = 1; - auto res = getFileTransfer()->download(request); - auto json = nlohmann::json::parse(*res.data); - if (json.find("Path") == json.end()) - throw Error("daemon for IPFS is not running properly"); - state->ipfsPath = json["Path"]; + state->ipfsPath = resolveIPNSName(ipnsPath, true); } } @@ -111,6 +102,20 @@ class IPFSBinaryCacheStore : public BinaryCacheStore } } + // Resolve the IPNS name to an IPFS object + std::string resolveIPNSName(std::string ipnsPath, bool offline) { + debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); + auto uri = daemonUri + "/api/v0/name/resolve?offline=" + (offline?"true":"false") + "&arg=" + getFileTransfer()->urlEncode(ipnsPath); + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; + auto res = getFileTransfer()->download(request); + auto json = nlohmann::json::parse(*res.data); + if (json.find("Path") == json.end()) + throw Error("daemon for IPFS is not running properly"); + return json["Path"]; + } + // IPNS publish can be slow, we try to do it rarely. void sync() override { @@ -120,6 +125,12 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto state(_state.lock()); + auto resolvedIpfsName = resolveIPNSName(ipnsPath, false); + if (resolvedIpfsName != state->ipfsPath) { + throw Error("The ipns hash %s doesn't correspond to the correct ipfs hash;\n wanted: %s\n got %s", + state->ipfsPath, resolvedIpfsName); + } + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); From caf4db0f38c5a9e136f7e913f94e8e4341de201b Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 12:58:12 -0400 Subject: [PATCH 044/104] Add initialIpfsPath and refine sync checks --- src/libstore/ipfs-binary-cache-store.cc | 43 ++++++++++++++++++------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index a74acd067e3..c5c6816b22b 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -21,6 +21,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto state(_state.lock()); return state->ipfsPath; } + std::string initialIpfsPath; std::optional optIpnsPath; struct State @@ -41,9 +42,10 @@ class IPFSBinaryCacheStore : public BinaryCacheStore if (cacheUri.back() == '/') cacheUri.pop_back(); - if (hasPrefix(cacheUri, "ipfs://")) - state->ipfsPath = "/ipfs/" + std::string(cacheUri, 7); - else if (hasPrefix(cacheUri, "ipns://")) + if (hasPrefix(cacheUri, "ipfs://")) { + initialIpfsPath = "/ipfs/" + std::string(cacheUri, 7); + state->ipfsPath = initialIpfsPath; + } else if (hasPrefix(cacheUri, "ipns://")) optIpnsPath = "/ipns/" + std::string(cacheUri, 7); else throw Error("unknown IPNS URI '%s'", cacheUri); @@ -64,7 +66,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // Resolve the IPNS name to an IPFS object if (optIpnsPath) { auto ipnsPath = *optIpnsPath; - state->ipfsPath = resolveIPNSName(ipnsPath, true); + initialIpfsPath = resolveIPNSName(ipnsPath, true); + state->ipfsPath = initialIpfsPath; } } @@ -83,6 +86,17 @@ class IPFSBinaryCacheStore : public BinaryCacheStore protected: + // Given a ipns path, checks if it corresponds to a DNSLink path. + bool isDNSLinkPath(std::string path) { + if (path.find("/ipns/") != 0) { + auto subpath = std::string(path, 6); + if (subpath.find(".") == std::string::npos) + return false; + return true; + } + throw Error("The provided path is not a ipns path"); + } + bool fileExists(const std::string & path) override { auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); @@ -119,16 +133,23 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // IPNS publish can be slow, we try to do it rarely. void sync() override { - if (!optIpnsPath) - return; + auto state(_state.lock()); + + if (!optIpnsPath) { + throw Error("We don't have an ipns path and the current ipfs address doesn't match the initial one.\n current: %s\n initial: %s", + state->ipfsPath, initialIpfsPath); + } + auto ipnsPath = *optIpnsPath; - auto state(_state.lock()); + if (isDNSLinkPath(ipnsPath)) { + throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n ipns path: %s", ipnsPath); + } - auto resolvedIpfsName = resolveIPNSName(ipnsPath, false); - if (resolvedIpfsName != state->ipfsPath) { - throw Error("The ipns hash %s doesn't correspond to the correct ipfs hash;\n wanted: %s\n got %s", - state->ipfsPath, resolvedIpfsName); + auto resolvedIpfsPath = resolveIPNSName(ipnsPath, false); + if (resolvedIpfsPath != initialIpfsPath) { + throw Error("The ipns hash %s doesn't correspond to the initial ipfs hash;\n wanted: %s\n got %s", + initialIpfsPath, resolvedIpfsPath); } debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); From 6f01ad854f65b569e9c15c4c7103894e17e42c8e Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 13:12:08 -0400 Subject: [PATCH 045/104] Deal with non happy paths first Co-authored-by: John Ericson --- src/libstore/ipfs-binary-cache-store.cc | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index c5c6816b22b..6bb0a6e0004 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -88,13 +88,10 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // Given a ipns path, checks if it corresponds to a DNSLink path. bool isDNSLinkPath(std::string path) { - if (path.find("/ipns/") != 0) { - auto subpath = std::string(path, 6); - if (subpath.find(".") == std::string::npos) - return false; - return true; - } - throw Error("The provided path is not a ipns path"); + if (path.find("/ipns/") == 0) + throw Error("The provided path is not a ipns path"); + auto subpath = std::string(path, 6); + return subpath.find(".") != std::string::npos; } bool fileExists(const std::string & path) override From c33307e486060626480a97421c2db9eedb6ff982 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 13:13:06 -0400 Subject: [PATCH 046/104] Correct condition --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 6bb0a6e0004..1347cd2c97b 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -88,7 +88,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // Given a ipns path, checks if it corresponds to a DNSLink path. bool isDNSLinkPath(std::string path) { - if (path.find("/ipns/") == 0) + if (path.find("/ipns/") != 0) throw Error("The provided path is not a ipns path"); auto subpath = std::string(path, 6); return subpath.find(".") != std::string::npos; From 5125a7cf012a88f4dd3473280820c52f5b94a1eb Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 13:15:04 -0400 Subject: [PATCH 047/104] Better error message Co-authored-by: John Ericson --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 1347cd2c97b..5c32e50f269 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -145,7 +145,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto resolvedIpfsPath = resolveIPNSName(ipnsPath, false); if (resolvedIpfsPath != initialIpfsPath) { - throw Error("The ipns hash %s doesn't correspond to the initial ipfs hash;\n wanted: %s\n got %s", + throw Error("The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started;\n wanted: %s\n got %s\nPerhaps something else updated it in the meantime?", initialIpfsPath, resolvedIpfsPath); } From 16ecf60bcf1cefdf2c6e555fae964b6bfe26c649 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 13:29:49 -0400 Subject: [PATCH 048/104] Better logic --- src/libstore/ipfs-binary-cache-store.cc | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 5c32e50f269..295bc116274 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -139,16 +139,22 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto ipnsPath = *optIpnsPath; - if (isDNSLinkPath(ipnsPath)) { - throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n ipns path: %s", ipnsPath); - } - auto resolvedIpfsPath = resolveIPNSName(ipnsPath, false); if (resolvedIpfsPath != initialIpfsPath) { throw Error("The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started;\n wanted: %s\n got %s\nPerhaps something else updated it in the meantime?", initialIpfsPath, resolvedIpfsPath); } + if resolvedIpfsPath == state->ipfsPath { + printMsg(lvlInfo, "The hash is already up to date, nothing to do"); + return; + } + + // Now, we know that paths are not up to date but also not changed due to updates in DNS or IPNS hash. + if (isDNSLinkPath(ipnsPath)) { + throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n ipns path: %s\nYou should update your DNS settings", ipnsPath); + } + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); From 35619454f45523901d014b0fcf29a739b19df5af Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 15 Jun 2020 13:38:59 -0400 Subject: [PATCH 049/104] Return the domain in isDNSLinkPath --- src/libstore/ipfs-binary-cache-store.cc | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 295bc116274..43a5a664de1 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -86,12 +86,16 @@ class IPFSBinaryCacheStore : public BinaryCacheStore protected: - // Given a ipns path, checks if it corresponds to a DNSLink path. - bool isDNSLinkPath(std::string path) { + // Given a ipns path, checks if it corresponds to a DNSLink path, and in + // case returns the domain + std::optional isDNSLinkPath(std::string path) { if (path.find("/ipns/") != 0) throw Error("The provided path is not a ipns path"); auto subpath = std::string(path, 6); - return subpath.find(".") != std::string::npos; + if (subpath.find(".") != std::string::npos) { + return subpath; + } + return std::nullopt; } bool fileExists(const std::string & path) override @@ -145,14 +149,17 @@ class IPFSBinaryCacheStore : public BinaryCacheStore initialIpfsPath, resolvedIpfsPath); } - if resolvedIpfsPath == state->ipfsPath { + if (resolvedIpfsPath == state->ipfsPath) { printMsg(lvlInfo, "The hash is already up to date, nothing to do"); return; } // Now, we know that paths are not up to date but also not changed due to updates in DNS or IPNS hash. - if (isDNSLinkPath(ipnsPath)) { - throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n ipns path: %s\nYou should update your DNS settings", ipnsPath); + auto optDomain = isDNSLinkPath(ipnsPath); + if (optDomain) { + auto domain = *optDomain; + throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n Current DNSLink: %s\nYou should update your DNS settings" + , domain); } debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); From f1d60662a25b9c40b414748b2a66414ec36b9bfe Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Mon, 15 Jun 2020 15:42:40 -0400 Subject: [PATCH 050/104] Move ipfs-specific method to own functions --- src/libstore/ipfs-binary-cache-store.cc | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 43a5a664de1..a84e1c8cac2 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -191,7 +191,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore state->ipfsPath = "/ipfs/" + (std::string) json["Hash"]; } - void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override + std::string addFile(const std::string & data) { // TODO: use callbacks @@ -199,10 +199,15 @@ class IPFSBinaryCacheStore : public BinaryCacheStore req.data = std::make_shared(data); req.post = true; req.tries = 1; + auto res = getFileTransfer()->upload(req); + auto json = nlohmann::json::parse(*res.data); + return (std::string) json["Hash"]; + } + + void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override + { try { - auto res = getFileTransfer()->upload(req); - auto json = nlohmann::json::parse(*res.data); - addLink(path, "/ipfs/" + (std::string) json["Hash"]); + addLink(path, "/ipfs/" + addFile(data)); } catch (FileTransferError & e) { throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); } @@ -211,7 +216,13 @@ class IPFSBinaryCacheStore : public BinaryCacheStore void getFile(const std::string & path, Callback> callback) noexcept override { - auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); + getIpfsObject(getIpfsPath() + "/" + path, std::move(callback)); + } + + void getIpfsObject(const std::string & ipfsPath, + Callback> callback) noexcept + { + auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(ipfsPath); FileTransferRequest request(uri); request.post = true; From 4ca97246f89ecdc04aec725d61b04f43276dd8e3 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Mon, 15 Jun 2020 15:44:48 -0400 Subject: [PATCH 051/104] Correctly specify visibility of ipfs-binary-cache-store --- src/libstore/ipfs-binary-cache-store.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index a84e1c8cac2..be458cdb2ca 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -84,7 +84,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore BinaryCacheStore::init(); } -protected: +private: // Given a ipns path, checks if it corresponds to a DNSLink path, and in // case returns the domain @@ -98,6 +98,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return std::nullopt; } +public: + bool fileExists(const std::string & path) override { auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); @@ -173,6 +175,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore getFileTransfer()->download(req); } +private: + void addLink(std::string name, std::string ipfsObject) { auto state(_state.lock()); @@ -204,6 +208,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return (std::string) json["Hash"]; } +public: + void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override { try { @@ -219,6 +225,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore getIpfsObject(getIpfsPath() + "/" + path, std::move(callback)); } +private: + void getIpfsObject(const std::string & ipfsPath, Callback> callback) noexcept { From 784e786527fad09d75cb7ae7f83c4566a21ae69b Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Mon, 15 Jun 2020 15:46:55 -0400 Subject: [PATCH 052/104] Add ipfsObjectExists --- src/libstore/ipfs-binary-cache-store.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index be458cdb2ca..82e2f7d782c 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -98,11 +98,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return std::nullopt; } -public: - - bool fileExists(const std::string & path) override + bool ipfsObjectExists(const std::string ipfsPath) { - auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(getIpfsPath() + "/" + path); + auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(ipfsPath); FileTransferRequest request(uri); request.post = true; @@ -119,6 +117,13 @@ class IPFSBinaryCacheStore : public BinaryCacheStore } } +public: + + bool fileExists(const std::string & path) override + { + return ipfsObjectExists(getIpfsRootDir() + "/" + path); + } + // Resolve the IPNS name to an IPFS object std::string resolveIPNSName(std::string ipnsPath, bool offline) { debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); From ca1a565e93cd1073b597130a4846542d791c9906 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Mon, 15 Jun 2020 17:13:16 -0400 Subject: [PATCH 053/104] Fixup ipfs-binary-cache-store modifiers --- src/libstore/ipfs-binary-cache-store.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 82e2f7d782c..a4ceee92037 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -88,7 +88,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore // Given a ipns path, checks if it corresponds to a DNSLink path, and in // case returns the domain - std::optional isDNSLinkPath(std::string path) { + static std::optional isDNSLinkPath(std::string path) + { if (path.find("/ipns/") != 0) throw Error("The provided path is not a ipns path"); auto subpath = std::string(path, 6); @@ -117,15 +118,17 @@ class IPFSBinaryCacheStore : public BinaryCacheStore } } -public: +protected: bool fileExists(const std::string & path) override { return ipfsObjectExists(getIpfsRootDir() + "/" + path); } +private: + // Resolve the IPNS name to an IPFS object - std::string resolveIPNSName(std::string ipnsPath, bool offline) { + static std::string resolveIPNSName(std::string ipnsPath, bool offline) { debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); auto uri = daemonUri + "/api/v0/name/resolve?offline=" + (offline?"true":"false") + "&arg=" + getFileTransfer()->urlEncode(ipnsPath); FileTransferRequest request(uri); @@ -138,6 +141,8 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return json["Path"]; } +public: + // IPNS publish can be slow, we try to do it rarely. void sync() override { @@ -213,7 +218,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return (std::string) json["Hash"]; } -public: +protected: void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override { From d939738c4596b1f4adda8fde5be9a3f99fd0ba4e Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Tue, 16 Jun 2020 10:55:49 -0400 Subject: [PATCH 054/104] Get and use proper key for uploading from the hash --- src/libstore/ipfs-binary-cache-store.cc | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 43a5a664de1..b93c0a0c558 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -165,7 +165,27 @@ class IPFSBinaryCacheStore : public BinaryCacheStore debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); - uri += "&key=" + std::string(ipnsPath, 6); + + // Given the hash, we want to discover the corresponding name in the + // `ipfs key list` command, so that we publish to the right address in + // case the user has multiple ones available. + + auto ipnsPathHash = std::string(ipnsPath, 6); + debug("Getting the name corresponding to hash %s", ipnsPathHash); + + auto keyListRequest = FileTransferRequest(daemonUri + "/api/v0/key/list/"); + keyListRequest.post = true; + keyListRequest.tries = 1; + + auto keyListResponse = nlohmann::json::parse(*(getFileTransfer()->download(keyListRequest)).data); + + std::string keyName; + for (nlohmann::json::iterator it = keyListResponse["Keys"].begin(); it != keyListResponse["Keys"].end(); ++it) + if ((*it)["Id"] == ipnsPathHash) + keyName = (*it)["Name"]; + + // Now we can append the keyname to our original request + uri += "&key=" + keyName; auto req = FileTransferRequest(uri); req.post = true; From 054cabfab90b7102a75b0ebe194c32fae4670219 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Mon, 15 Jun 2020 17:35:13 -0400 Subject: [PATCH 055/104] Add dag helper methods --- src/libstore/ipfs-binary-cache-store.cc | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index a4ceee92037..7b2db46133e 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -86,6 +86,26 @@ class IPFSBinaryCacheStore : public BinaryCacheStore private: + std::string putIpfsDag(std::string data) + { + auto req = FileTransferRequest(daemonUri + "/api/v0/dag/put"); + req.data = std::make_shared(data); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->upload(req); + auto json = nlohmann::json::parse(*res.data); + return json["Cid"]["/"]; + } + + std::string getIpfsDag(std::string objectPath) + { + auto req = FileTransferRequest(daemonUri + "/api/v0/dag/get?arg=" + objectPath); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->download(req); + return *res.data; + } + // Given a ipns path, checks if it corresponds to a DNSLink path, and in // case returns the domain static std::optional isDNSLinkPath(std::string path) @@ -122,13 +142,13 @@ class IPFSBinaryCacheStore : public BinaryCacheStore bool fileExists(const std::string & path) override { - return ipfsObjectExists(getIpfsRootDir() + "/" + path); + return ipfsObjectExists(getIpfsPath() + "/" + path); } private: // Resolve the IPNS name to an IPFS object - static std::string resolveIPNSName(std::string ipnsPath, bool offline) { + std::string resolveIPNSName(std::string ipnsPath, bool offline) { debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); auto uri = daemonUri + "/api/v0/name/resolve?offline=" + (offline?"true":"false") + "&arg=" + getFileTransfer()->urlEncode(ipnsPath); FileTransferRequest request(uri); From df3aea2e1f37189526950e77413c1b0151df0262 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 16 Jun 2020 13:26:04 -0400 Subject: [PATCH 056/104] Implement Store directly instead of via BinaryCacheStore This leads to some duplication, but we will slowly remove that as we continue to add IPLD meta info. --- src/libstore/ipfs-binary-cache-store.cc | 309 ++++++++++++++++++++++-- 1 file changed, 292 insertions(+), 17 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 7b2db46133e..0e1d95c278e 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -4,16 +4,28 @@ #include "binary-cache-store.hh" #include "filetransfer.hh" #include "nar-info-disk-cache.hh" +#include "archive.hh" +#include "compression.hh" namespace nix { MakeError(UploadToIPFS, Error); -class IPFSBinaryCacheStore : public BinaryCacheStore +class IPFSBinaryCacheStore : public Store { +public: + + const Setting compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; + const Setting secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; + const Setting parallelCompression{this, false, "parallel-compression", + "enable multi-threading compression, available for xz only currently"}; + private: + std::unique_ptr secretKey; + std::string narMagic; + std::string cacheUri; std::string daemonUri; @@ -34,11 +46,18 @@ class IPFSBinaryCacheStore : public BinaryCacheStore IPFSBinaryCacheStore( const Params & params, const Path & _cacheUri) - : BinaryCacheStore(params) + : Store(params) , cacheUri(_cacheUri) { auto state(_state.lock()); + if (secretKeyFile != "") + secretKey = std::unique_ptr(new SecretKey(readFile(secretKeyFile))); + + StringSink sink; + sink << narVersionMagic1; + narMagic = *sink.s; + if (cacheUri.back() == '/') cacheUri.pop_back(); @@ -76,14 +95,6 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return cacheUri; } - void init() override - { - std::string cacheInfoFile = "nix-cache-info"; - if (!fileExists(cacheInfoFile)) - upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info"); - BinaryCacheStore::init(); - } - private: std::string putIpfsDag(std::string data) @@ -140,7 +151,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore protected: - bool fileExists(const std::string & path) override + bool fileExists(const std::string & path) { return ipfsObjectExists(getIpfsPath() + "/" + path); } @@ -238,9 +249,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore return (std::string) json["Hash"]; } -protected: - - void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) override + void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) { try { addLink(path, "/ipfs/" + addFile(data)); @@ -250,12 +259,36 @@ class IPFSBinaryCacheStore : public BinaryCacheStore } void getFile(const std::string & path, - Callback> callback) noexcept override + Callback> callback) noexcept { getIpfsObject(getIpfsPath() + "/" + path, std::move(callback)); } -private: + void getFile(const std::string & path, Sink & sink) + { + std::promise> promise; + getFile(path, + {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + auto data = promise.get_future().get(); + sink((unsigned char *) data->data(), data->size()); + } + + std::shared_ptr getFile(const std::string & path) + { + StringSink sink; + try { + getFile(path, sink); + } catch (NoSuchBinaryCacheFile &) { + return nullptr; + } + return sink.s; + } void getIpfsObject(const std::string & ipfsPath, Callback> callback) noexcept @@ -281,6 +314,249 @@ class IPFSBinaryCacheStore : public BinaryCacheStore ); } + std::string narInfoFileFor(const StorePath & storePath) + { + return storePathToHash(printStorePath(storePath)) + ".narinfo"; + } + + void writeNarInfo(ref narInfo) + { + auto narInfoFile = narInfoFileFor(narInfo->path); + + upsertFile(narInfoFile, narInfo->to_string(*this), "text/x-nix-narinfo"); + + auto hashPart = storePathToHash(printStorePath(narInfo->path)); + + { + auto state_(state.lock()); + state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = std::shared_ptr(narInfo) }); + } + } + +public: + + void addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr accessor) override + { + // FIXME: See if we can use the original source to reduce memory usage. + auto nar = make_ref(narSource.drain()); + + if (!repair && isValidPath(info.path)) return; + + /* Verify that all references are valid. This may do some .narinfo + reads, but typically they'll already be cached. */ + for (auto & ref : info.references) + try { + if (ref != info.path) + queryPathInfo(ref); + } catch (InvalidPath &) { + throw Error("cannot add '%s' to the binary cache because the reference '%s' is not valid", + printStorePath(info.path), printStorePath(ref)); + } + + assert(nar->compare(0, narMagic.size(), narMagic) == 0); + + auto narInfo = make_ref(info); + + narInfo->narSize = nar->size(); + narInfo->narHash = hashString(htSHA256, *nar); + + if (info.narHash && info.narHash != narInfo->narHash) + throw Error("refusing to copy corrupted path '%1%' to binary cache", printStorePath(info.path)); + + /* Compress the NAR. */ + narInfo->compression = compression; + auto now1 = std::chrono::steady_clock::now(); + auto narCompressed = compress(compression, *nar, parallelCompression); + auto now2 = std::chrono::steady_clock::now(); + narInfo->fileHash = hashString(htSHA256, *narCompressed); + narInfo->fileSize = narCompressed->size(); + + auto duration = std::chrono::duration_cast(now2 - now1).count(); + printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache", + printStorePath(narInfo->path), narInfo->narSize, + ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0), + duration); + + narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar" + + (compression == "xz" ? ".xz" : + compression == "bzip2" ? ".bz2" : + compression == "br" ? ".br" : + ""); + + /* Atomically write the NAR file. */ + if (repair || !fileExists(narInfo->url)) { + stats.narWrite++; + upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar"); + } else + stats.narWriteAverted++; + + stats.narWriteBytes += nar->size(); + stats.narWriteCompressedBytes += narCompressed->size(); + stats.narWriteCompressionTimeMs += duration; + + /* Atomically write the NAR info file.*/ + if (secretKey) narInfo->sign(*this, *secretKey); + + writeNarInfo(narInfo); + + stats.narInfoWrite++; + } + + bool isValidPathUncached(const StorePath & storePath) override + { + return fileExists(narInfoFileFor(storePath)); + } + + void narFromPath(const StorePath & storePath, Sink & sink) override + { + auto info = queryPathInfo(storePath).cast(); + + uint64_t narSize = 0; + + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + narSize += len; + }); + + auto decompressor = makeDecompressionSink(info->compression, wrapperSink); + + try { + getFile(info->url, *decompressor); + } catch (NoSuchBinaryCacheFile & e) { + throw SubstituteGone(e.what()); + } + + decompressor->finish(); + + stats.narRead++; + //stats.narReadCompressedBytes += nar->size(); // FIXME + stats.narReadBytes += narSize; + } + + void queryPathInfoUncached(const StorePath & storePath, + Callback> callback) noexcept override + { + auto uri = getUri(); + auto storePathS = printStorePath(storePath); + auto act = std::make_shared(*logger, lvlTalkative, actQueryPathInfo, + fmt("querying info about '%s' on '%s'", storePathS, uri), Logger::Fields{storePathS, uri}); + PushActivity pact(act->id); + + auto narInfoFile = narInfoFileFor(storePath); + + auto callbackPtr = std::make_shared(std::move(callback)); + + getFile(narInfoFile, + {[=](std::future> fut) { + try { + auto data = fut.get(); + + if (!data) return (*callbackPtr)(nullptr); + + stats.narInfoRead++; + + (*callbackPtr)((std::shared_ptr) + std::make_shared(*this, *data, narInfoFile)); + + (void) act; // force Activity into this lambda to ensure it stays alive + } catch (...) { + callbackPtr->rethrow(); + } + }}); + } + + StorePath addToStore(const string & name, const Path & srcPath, + FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) override + { + // FIXME: some cut&paste from LocalStore::addToStore(). + + /* Read the whole path into memory. This is not a very scalable + method for very large paths, but `copyPath' is mainly used for + small files. */ + StringSink sink; + Hash h; + if (method == FileIngestionMethod::Recursive) { + dumpPath(srcPath, sink, filter); + h = hashString(hashAlgo, *sink.s); + } else { + auto s = readFile(srcPath); + dumpString(s, sink); + h = hashString(hashAlgo, s); + } + + ValidPathInfo info(makeFixedOutputPath(method, h, name)); + + auto source = StringSource { *sink.s }; + addToStore(info, source, repair, CheckSigs, nullptr); + + return std::move(info.path); + } + + StorePath addTextToStore(const string & name, const string & s, + const StorePathSet & references, RepairFlag repair) override + { + ValidPathInfo info(computeStorePathForText(name, s, references)); + info.references = cloneStorePathSet(references); + + if (repair || !isValidPath(info.path)) { + StringSink sink; + dumpString(s, sink); + auto source = StringSource { *sink.s }; + addToStore(info, source, repair, CheckSigs, nullptr); + } + + return std::move(info.path); + } + + void addSignatures(const StorePath & storePath, const StringSet & sigs) override + { + /* Note: this is inherently racy since there is no locking on + binary caches. In particular, with S3 this unreliable, even + when addSignatures() is called sequentially on a path, because + S3 might return an outdated cached version. */ + + auto narInfo = make_ref((NarInfo &) *queryPathInfo(storePath)); + + narInfo->sigs.insert(sigs.begin(), sigs.end()); + + auto narInfoFile = narInfoFileFor(narInfo->path); + + writeNarInfo(narInfo); + } + + std::shared_ptr getBuildLog(const StorePath & path) override + { + auto drvPath = path.clone(); + + if (!path.isDerivation()) { + try { + auto info = queryPathInfo(path); + // FIXME: add a "Log" field to .narinfo + if (!info->deriver) return nullptr; + drvPath = info->deriver->clone(); + } catch (InvalidPath &) { + return nullptr; + } + } + + auto logPath = "log/" + std::string(baseNameOf(printStorePath(drvPath))); + + debug("fetching build log from binary cache '%s/%s'", getUri(), logPath); + + return getFile(logPath); + } + + BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, + BuildMode buildMode) override + { unsupported("buildDerivation"); } + + void ensurePath(const StorePath & path) override + { unsupported("ensurePath"); } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { unsupported("queryPathFromHashPart"); } + }; static RegisterStoreImplementation regStore([]( @@ -291,7 +567,6 @@ static RegisterStoreImplementation regStore([]( uri.substr(0, strlen("ipns://")) != "ipns://") return 0; auto store = std::make_shared(params, uri); - store->init(); return store; }); From 213ded33b52a5adbbe9a177dd904c62f8eed500c Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 16 Jun 2020 14:01:34 -0400 Subject: [PATCH 057/104] Remove offline from publish --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 0e1d95c278e..bdc8e787531 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -207,7 +207,7 @@ class IPFSBinaryCacheStore : public Store debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); - auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + auto uri = daemonUri + "/api/v0/name/publish?arg=" + getFileTransfer()->urlEncode(state->ipfsPath); uri += "&key=" + std::string(ipnsPath, 6); auto req = FileTransferRequest(uri); From 93d71aed2042e01692e69c1c81bd45e7c7062f58 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 16 Jun 2020 18:35:17 -0400 Subject: [PATCH 058/104] Use IPLD instead of UnixFS This lets us have some more structure to our data. We can store key value for meta and narinfos. --- src/libstore/ipfs-binary-cache-store.cc | 210 ++++++++++++++++-------- 1 file changed, 142 insertions(+), 68 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index bdc8e787531..00b04a9ce5c 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -88,6 +88,22 @@ class IPFSBinaryCacheStore : public Store initialIpfsPath = resolveIPNSName(ipnsPath, true); state->ipfsPath = initialIpfsPath; } + + auto json = getIpfsDag(state->ipfsPath); + + // Verify StoreDir is correct + if (json.find("StoreDir") == json.end()) { + json["StoreDir"] = storeDir; + state->ipfsPath = putIpfsDag(json); + } else if (json["StoreDir"] != storeDir) + throw Error(format("binary cache '%s' is for Nix stores with prefix '%s', not '%s'") + % getUri() % json["StoreDir"] % storeDir); + + if (json.find("WantMassQuery") != json.end()) + wantMassQuery.setDefault(json["WantMassQuery"] ? "true" : "false"); + + if (json.find("Priority") != json.end()) + priority.setDefault(fmt("%d", json["Priority"])); } std::string getUri() override @@ -97,24 +113,27 @@ class IPFSBinaryCacheStore : public Store private: - std::string putIpfsDag(std::string data) + std::string putIpfsDag(nlohmann::json data) { auto req = FileTransferRequest(daemonUri + "/api/v0/dag/put"); - req.data = std::make_shared(data); + req.data = std::make_shared(data.dump()); req.post = true; req.tries = 1; auto res = getFileTransfer()->upload(req); auto json = nlohmann::json::parse(*res.data); - return json["Cid"]["/"]; + return "/ipfs/" + (std::string) json["Cid"]["/"]; } - std::string getIpfsDag(std::string objectPath) + nlohmann::json getIpfsDag(std::string objectPath) { + debug("get ipfs dag %s", objectPath); + auto req = FileTransferRequest(daemonUri + "/api/v0/dag/get?arg=" + objectPath); req.post = true; req.tries = 1; auto res = getFileTransfer()->download(req); - return *res.data; + auto json = nlohmann::json::parse(*res.data); + return json; } // Given a ipns path, checks if it corresponds to a DNSLink path, and in @@ -149,15 +168,11 @@ class IPFSBinaryCacheStore : public Store } } -protected: - bool fileExists(const std::string & path) { return ipfsObjectExists(getIpfsPath() + "/" + path); } -private: - // Resolve the IPNS name to an IPFS object std::string resolveIPNSName(std::string ipnsPath, bool offline) { debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); @@ -261,7 +276,10 @@ class IPFSBinaryCacheStore : public Store void getFile(const std::string & path, Callback> callback) noexcept { - getIpfsObject(getIpfsPath() + "/" + path, std::move(callback)); + std::string path_(path); + if (hasPrefix(path, "ipfs://")) + path_ = "/ipfs/" + std::string(path, 7); + getIpfsObject(path_, std::move(callback)); } void getFile(const std::string & path, Sink & sink) @@ -314,21 +332,68 @@ class IPFSBinaryCacheStore : public Store ); } - std::string narInfoFileFor(const StorePath & storePath) - { - return storePathToHash(printStorePath(storePath)) + ".narinfo"; - } - void writeNarInfo(ref narInfo) { - auto narInfoFile = narInfoFileFor(narInfo->path); + auto json = nlohmann::json::object(); + json["narHash"] = narInfo->narHash.to_string(Base32); + json["narSize"] = narInfo->narSize; + + auto narMap = getIpfsDag(getIpfsPath())["nar"]; + + json["references"] = nlohmann::json::array(); + for (auto & ref : narInfo->references) { + if (ref == narInfo->path) { + json["references"].push_back(printStorePath(ref)); + } else { + json["references"].push_back(narMap[printStorePath(ref)]); + } + } - upsertFile(narInfoFile, narInfo->to_string(*this), "text/x-nix-narinfo"); + if (narInfo->ca != "") + json["ca"] = narInfo->ca; - auto hashPart = storePathToHash(printStorePath(narInfo->path)); + if (narInfo->deriver) + json["deriver"] = printStorePath(*narInfo->deriver); + + if (narInfo->registrationTime) + json["registrationTime"] = narInfo->registrationTime; + + if (narInfo->ultimate) + json["ultimate"] = narInfo->ultimate; + + if (!narInfo->sigs.empty()) { + json["sigs"] = nlohmann::json::array(); + for (auto & sig : narInfo->sigs) + json["sigs"].push_back(sig); + } + + if (!narInfo->url.empty()) { + json["url"] = nlohmann::json::object(); + json["url"]["/"] = std::string(narInfo->url, 7); + } + if (narInfo->fileHash) + json["downloadHash"] = narInfo->fileHash.to_string(); + if (narInfo->fileSize) + json["downloadSize"] = narInfo->fileSize; + + auto narObjectPath = putIpfsDag(json); + + auto state(_state.lock()); + json = getIpfsDag(state->ipfsPath); + + if (json.find("nar") == json.end()) + json["nar"] = nlohmann::json::object(); + + auto hashObject = nlohmann::json::object(); + hashObject.emplace("/", std::string(narObjectPath, 6)); + + json["nar"].emplace(printStorePath(narInfo->path), hashObject); + + state->ipfsPath = putIpfsDag(json); { - auto state_(state.lock()); + auto hashPart = storePathToHash(printStorePath(narInfo->path)); + auto state_(this->state.lock()); state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = std::shared_ptr(narInfo) }); } } @@ -378,18 +443,9 @@ class IPFSBinaryCacheStore : public Store ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0), duration); - narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar" - + (compression == "xz" ? ".xz" : - compression == "bzip2" ? ".bz2" : - compression == "br" ? ".br" : - ""); - /* Atomically write the NAR file. */ - if (repair || !fileExists(narInfo->url)) { - stats.narWrite++; - upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar"); - } else - stats.narWriteAverted++; + stats.narWrite++; + narInfo->url = "ipfs://" + addFile(*narCompressed); stats.narWriteBytes += nar->size(); stats.narWriteCompressedBytes += narCompressed->size(); @@ -405,7 +461,10 @@ class IPFSBinaryCacheStore : public Store bool isValidPathUncached(const StorePath & storePath) override { - return fileExists(narInfoFileFor(storePath)); + auto json = getIpfsDag(getIpfsPath()); + if (!json.contains("nar")) + return false; + return json["nar"].contains(printStorePath(storePath)); } void narFromPath(const StorePath & storePath, Sink & sink) override @@ -437,33 +496,69 @@ class IPFSBinaryCacheStore : public Store void queryPathInfoUncached(const StorePath & storePath, Callback> callback) noexcept override { + // TODO: properly use callbacks + + auto callbackPtr = std::make_shared(std::move(callback)); + auto uri = getUri(); auto storePathS = printStorePath(storePath); auto act = std::make_shared(*logger, lvlTalkative, actQueryPathInfo, fmt("querying info about '%s' on '%s'", storePathS, uri), Logger::Fields{storePathS, uri}); PushActivity pact(act->id); - auto narInfoFile = narInfoFileFor(storePath); + auto json = getIpfsDag(getIpfsPath()); - auto callbackPtr = std::make_shared(std::move(callback)); + if (!json.contains("nar") || !json["nar"].contains(printStorePath(storePath))) + return (*callbackPtr)(nullptr); - getFile(narInfoFile, - {[=](std::future> fut) { - try { - auto data = fut.get(); + auto narObjectHash = (std::string) json["nar"][printStorePath(storePath)]["/"]; + json = getIpfsDag("/ipfs/" + narObjectHash); - if (!data) return (*callbackPtr)(nullptr); + NarInfo narInfo(storePath.clone()); + narInfo.narHash = Hash((std::string) json["narHash"]); + narInfo.narSize = json["narSize"]; - stats.narInfoRead++; + auto narMap = getIpfsDag(getIpfsPath())["nar"]; + for (auto & ref : json["references"]) { + if (ref.type() == nlohmann::json::value_t::object) { + for (auto & v : narMap.items()) { + if (v.value() == ref) { + narInfo.references.insert(parseStorePath(v.key())); + break; + } + } + } else if (ref.type() == nlohmann::json::value_t::string) + narInfo.references.insert(parseStorePath((std::string) ref)); + } - (*callbackPtr)((std::shared_ptr) - std::make_shared(*this, *data, narInfoFile)); + if (json.find("ca") != json.end()) + narInfo.ca = json["ca"]; - (void) act; // force Activity into this lambda to ensure it stays alive - } catch (...) { - callbackPtr->rethrow(); - } - }}); + if (json.find("deriver") != json.end()) + narInfo.deriver = parseStorePath((std::string) json["deriver"]); + + if (json.find("registrationTime") != json.end()) + narInfo.registrationTime = json["registrationTime"]; + + if (json.find("ultimate") != json.end()) + narInfo.ultimate = json["ultimate"]; + + if (json.find("sigs") != json.end()) + for (auto & sig : json["sigs"]) + narInfo.sigs.insert((std::string) sig); + + if (json.find("url") != json.end()) { + narInfo.url = "/ipfs/" + json["url"]["/"]; + } + + if (json.find("downloadHash") != json.end()) + narInfo.fileHash = Hash((std::string) json["downloadHash"]); + + if (json.find("downloadSize") != json.end()) + narInfo.fileSize = json["downloadSize"]; + + (*callbackPtr)((std::shared_ptr) + std::make_shared(narInfo)); } StorePath addToStore(const string & name, const Path & srcPath, @@ -520,32 +615,11 @@ class IPFSBinaryCacheStore : public Store narInfo->sigs.insert(sigs.begin(), sigs.end()); - auto narInfoFile = narInfoFileFor(narInfo->path); - writeNarInfo(narInfo); } std::shared_ptr getBuildLog(const StorePath & path) override - { - auto drvPath = path.clone(); - - if (!path.isDerivation()) { - try { - auto info = queryPathInfo(path); - // FIXME: add a "Log" field to .narinfo - if (!info->deriver) return nullptr; - drvPath = info->deriver->clone(); - } catch (InvalidPath &) { - return nullptr; - } - } - - auto logPath = "log/" + std::string(baseNameOf(printStorePath(drvPath))); - - debug("fetching build log from binary cache '%s/%s'", getUri(), logPath); - - return getFile(logPath); - } + { unsupported("getBuildLog"); } BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode) override From d1fc0770e3e6c1c738f36262d8709c37b66987a0 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Tue, 16 Jun 2020 18:36:05 -0400 Subject: [PATCH 059/104] Make error message more idiomatic --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 00b04a9ce5c..27366cc4e4d 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -141,7 +141,7 @@ class IPFSBinaryCacheStore : public Store static std::optional isDNSLinkPath(std::string path) { if (path.find("/ipns/") != 0) - throw Error("The provided path is not a ipns path"); + throw Error("path '%s' is not an ipns path", path); auto subpath = std::string(path, 6); if (subpath.find(".") != std::string::npos) { return subpath; From 7fa8391f3f6aafb6b1540b5e078e32bb5454e574 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 17 Jun 2020 10:55:59 -0400 Subject: [PATCH 060/104] Properly store compression / system in ipfs binary cache These values were originally missing in the json. They are needed to properly decompress the NAR. --- src/libstore/ipfs-binary-cache-store.cc | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index bdcda86861f..ef259966e6f 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -126,8 +126,6 @@ class IPFSBinaryCacheStore : public Store nlohmann::json getIpfsDag(std::string objectPath) { - debug("get ipfs dag %s", objectPath); - auto req = FileTransferRequest(daemonUri + "/api/v0/dag/get?arg=" + objectPath); req.post = true; req.tries = 1; @@ -376,6 +374,9 @@ class IPFSBinaryCacheStore : public Store if (narInfo->fileSize) json["downloadSize"] = narInfo->fileSize; + json["compression"] = narInfo->compression; + json["system"] = narInfo->system; + auto narObjectPath = putIpfsDag(json); auto state(_state.lock()); @@ -549,9 +550,8 @@ class IPFSBinaryCacheStore : public Store for (auto & sig : json["sigs"]) narInfo.sigs.insert((std::string) sig); - if (json.find("url") != json.end()) { - narInfo.url = "/ipfs/" + json["url"]["/"].get(); - } + if (json.find("url") != json.end()) + narInfo.url = "ipfs://" + json["url"]["/"].get(); if (json.find("downloadHash") != json.end()) narInfo.fileHash = Hash((std::string) json["downloadHash"]); @@ -559,6 +559,12 @@ class IPFSBinaryCacheStore : public Store if (json.find("downloadSize") != json.end()) narInfo.fileSize = json["downloadSize"]; + if (json.find("compression") != json.end()) + narInfo.compression = json["compression"]; + + if (json.find("system") != json.end()) + narInfo.system = json["system"]; + (*callbackPtr)((std::shared_ptr) std::make_shared(narInfo)); } From 00e351b8fe71ec648ac6c55d08826013a72372f6 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 17 Jun 2020 13:40:05 -0400 Subject: [PATCH 061/104] Add direct uploading to ipfs and ipns --- src/libstore/ipfs-binary-cache-store.cc | 2 +- tests/ipfs.sh | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index b93c0a0c558..cc09a516290 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -143,7 +143,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto ipnsPath = *optIpnsPath; - auto resolvedIpfsPath = resolveIPNSName(ipnsPath, false); + auto resolvedIpfsPath = resolveIPNSName(ipnsPath, true); if (resolvedIpfsPath != initialIpfsPath) { throw Error("The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started;\n wanted: %s\n got %s\nPerhaps something else updated it in the meantime?", initialIpfsPath, resolvedIpfsPath); diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 9cad1c381f6..2903cc8d244 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -58,7 +58,7 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE -storePaths=$(nix-build ./fixed.nix -A good | sort | uniq) +storePaths=$(nix-build ./fixed.nix -A good) # Hack around https://github.com/NixOS/nix/issues/3695 for path in $storePaths; do @@ -90,6 +90,9 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ ## Create the ipfs store and download the derivation there ################################################################################ +# Check that we can upload the ipfs store directly +nix copy --to ipfs://$IPFS_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command + mkdir $IPFS_DST_IPFS_STORE DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ @@ -107,6 +110,9 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ # First I have to publish: IPNS_ID=$(ipfs name publish $IPFS_HASH --allow-offline | awk '{print substr($3,1,length($3)-1)}') +# Check that we can upload the ipns store directly +nix copy --to ipns://$IPNS_ID $(nix-build ./fixed.nix -A good) --experimental-features nix-command + mkdir $IPFS_DST_IPNS_STORE DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ From 90c5199dc06b04eb4bf6e7c0541ed26cd429b81a Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 17 Jun 2020 14:29:14 -0400 Subject: [PATCH 062/104] Add possibility of pushing directly to ipfs --- tests/ipfs.sh | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 2903cc8d244..61086c15b1e 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -43,6 +43,8 @@ IPFS_DST_HTTP_LOCAL_STORE=$IPFS_TESTS/ipfs_dest_http_local_store IPFS_DST_IPFS_STORE=$IPFS_TESTS/ipfs_dest_ipfs_store IPFS_DST_IPNS_STORE=$IPFS_TESTS/ipfs_dest_ipns_store +EMPTY_DIR_HASH=QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn + ################################################################################ ## Generate the keys to sign the store ################################################################################ @@ -54,22 +56,25 @@ SIGNING_KEY_PUB_FILE=$IPFS_TESTS/nix-cache-key.pub nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $SIGNING_KEY_PUB_FILE ################################################################################ -## Create, sign and upload the source store +## Create and sign the source store ################################################################################ mkdir -p $IPFS_SRC_STORE storePaths=$(nix-build ./fixed.nix -A good) +nix sign-paths -k $SIGNING_KEY_PRI_FILE $storePaths + +################################################################################ +## Manually upload the source store +################################################################################ + # Hack around https://github.com/NixOS/nix/issues/3695 for path in $storePaths; do nix copy --to file://$IPFS_SRC_STORE $path done unset path -nix sign-paths --store file://$IPFS_SRC_STORE \ - -k $SIGNING_KEY_PRI_FILE $storePaths - -IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') +MANUAL_IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') ################################################################################ ## Create the local http store and download the derivation there @@ -80,7 +85,7 @@ mkdir $IPFS_DST_HTTP_LOCAL_STORE IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ - --option substituters $IPFS_HTTP_LOCAL_PREFIX/$IPFS_HASH \ + --option substituters $IPFS_HTTP_LOCAL_PREFIX/$MANUAL_IPFS_HASH \ --store $IPFS_DST_HTTP_LOCAL_STORE \ --no-out-link \ -j0 \ @@ -90,7 +95,13 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ ## Create the ipfs store and download the derivation there ################################################################################ -# Check that we can upload the ipfs store directly +# Try to upload the content to the empty directory, fail but grab the right hash +IPFS_HASH=$(set -e; \ + set -o pipefail; \ + ! nix copy --to ipfs://$EMPTY_DIR_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command \ + |& grep current: | awk '{print substr($2, 7, length($2))}') + +# Upload the content with the right hash nix copy --to ipfs://$IPFS_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command mkdir $IPFS_DST_IPFS_STORE From 23347cb3a946cc0adb10ecb8566d28ddd8f56cf9 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 17 Jun 2020 15:18:31 -0400 Subject: [PATCH 063/104] Move compareVersions to libutil this is useful outside of libexpr --- src/libexpr/names.cc | 56 +------------------------------------ src/libexpr/names.hh | 4 +-- src/libutil/versions.cc | 61 +++++++++++++++++++++++++++++++++++++++++ src/libutil/versions.hh | 14 ++++++++++ 4 files changed, 77 insertions(+), 58 deletions(-) create mode 100644 src/libutil/versions.cc create mode 100644 src/libutil/versions.hh diff --git a/src/libexpr/names.cc b/src/libexpr/names.cc index d1c8a6101f8..d9ed8fa68ef 100644 --- a/src/libexpr/names.cc +++ b/src/libexpr/names.cc @@ -1,5 +1,5 @@ #include "names.hh" -#include "util.hh" +#include "versions.hh" namespace nix { @@ -41,60 +41,6 @@ bool DrvName::matches(DrvName & n) } -string nextComponent(string::const_iterator & p, - const string::const_iterator end) -{ - /* Skip any dots and dashes (component separators). */ - while (p != end && (*p == '.' || *p == '-')) ++p; - - if (p == end) return ""; - - /* If the first character is a digit, consume the longest sequence - of digits. Otherwise, consume the longest sequence of - non-digit, non-separator characters. */ - string s; - if (isdigit(*p)) - while (p != end && isdigit(*p)) s += *p++; - else - while (p != end && (!isdigit(*p) && *p != '.' && *p != '-')) - s += *p++; - - return s; -} - - -static bool componentsLT(const string & c1, const string & c2) -{ - int n1, n2; - bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2); - - if (c1Num && c2Num) return n1 < n2; - else if (c1 == "" && c2Num) return true; - else if (c1 == "pre" && c2 != "pre") return true; - else if (c2 == "pre") return false; - /* Assume that `2.3a' < `2.3.1'. */ - else if (c2Num) return true; - else if (c1Num) return false; - else return c1 < c2; -} - - -int compareVersions(const string & v1, const string & v2) -{ - string::const_iterator p1 = v1.begin(); - string::const_iterator p2 = v2.begin(); - - while (p1 != v1.end() || p2 != v2.end()) { - string c1 = nextComponent(p1, v1.end()); - string c2 = nextComponent(p2, v2.end()); - if (componentsLT(c1, c2)) return -1; - else if (componentsLT(c2, c1)) return 1; - } - - return 0; -} - - DrvNames drvNamesFromArgs(const Strings & opArgs) { DrvNames result; diff --git a/src/libexpr/names.hh b/src/libexpr/names.hh index 00e14b8c797..d682a69d078 100644 --- a/src/libexpr/names.hh +++ b/src/libexpr/names.hh @@ -3,6 +3,7 @@ #include #include "types.hh" +#include "versions.hh" #include namespace nix { @@ -24,9 +25,6 @@ private: typedef list DrvNames; -string nextComponent(string::const_iterator & p, - const string::const_iterator end); -int compareVersions(const string & v1, const string & v2); DrvNames drvNamesFromArgs(const Strings & opArgs); } diff --git a/src/libutil/versions.cc b/src/libutil/versions.cc new file mode 100644 index 00000000000..d38f3ec2518 --- /dev/null +++ b/src/libutil/versions.cc @@ -0,0 +1,61 @@ +#include "util.hh" +#include "versions.hh" + +namespace nix { + + +string nextComponent(string::const_iterator & p, + const string::const_iterator end) +{ + /* Skip any dots and dashes (component separators). */ + while (p != end && (*p == '.' || *p == '-')) ++p; + + if (p == end) return ""; + + /* If the first character is a digit, consume the longest sequence + of digits. Otherwise, consume the longest sequence of + non-digit, non-separator characters. */ + string s; + if (isdigit(*p)) + while (p != end && isdigit(*p)) s += *p++; + else + while (p != end && (!isdigit(*p) && *p != '.' && *p != '-')) + s += *p++; + + return s; +} + + +static bool componentsLT(const string & c1, const string & c2) +{ + int n1, n2; + bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2); + + if (c1Num && c2Num) return n1 < n2; + else if (c1 == "" && c2Num) return true; + else if (c1 == "pre" && c2 != "pre") return true; + else if (c2 == "pre") return false; + /* Assume that `2.3a' < `2.3.1'. */ + else if (c2Num) return true; + else if (c1Num) return false; + else return c1 < c2; +} + + +int compareVersions(const string & v1, const string & v2) +{ + string::const_iterator p1 = v1.begin(); + string::const_iterator p2 = v2.begin(); + + while (p1 != v1.end() || p2 != v2.end()) { + string c1 = nextComponent(p1, v1.end()); + string c2 = nextComponent(p2, v2.end()); + if (componentsLT(c1, c2)) return -1; + else if (componentsLT(c2, c1)) return 1; + } + + return 0; +} + + +} diff --git a/src/libutil/versions.hh b/src/libutil/versions.hh new file mode 100644 index 00000000000..41e6f0f417b --- /dev/null +++ b/src/libutil/versions.hh @@ -0,0 +1,14 @@ +#pragma once + +#include + +#include "types.hh" +#include + +namespace nix { + +string nextComponent(string::const_iterator & p, + const string::const_iterator end); +int compareVersions(const string & v1, const string & v2); + +} From 38ef66d16dc5a380f43eb2d57efd2385bfe6417c Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 17 Jun 2020 15:21:52 -0400 Subject: [PATCH 064/104] Add check to make sure ipfs daemon is 0.4.0 In the future we may make this 0.5.0. --- src/libstore/ipfs-binary-cache-store.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 8de3b035934..49daed131e6 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -4,6 +4,7 @@ #include "binary-cache-store.hh" #include "filetransfer.hh" #include "nar-info-disk-cache.hh" +#include "versions.hh" namespace nix { @@ -61,6 +62,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore if (versionInfo.find("Version") == versionInfo.end()) throw Error("daemon for IPFS is not running properly"); + if (compareVersions(versionInfo["Version"], "0.4.0") < 0) + throw Error("daemon for IPFS is %s, when a minimum of 0.4.0 is required", versionInfo["Version"]); + // Resolve the IPNS name to an IPFS object if (optIpnsPath) { auto ipnsPath = *optIpnsPath; From fadc5bfdf4eb6808e10b2df3c23afbd56fe249f4 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 17 Jun 2020 15:29:43 -0400 Subject: [PATCH 065/104] Publish the empty-dir to ipns to get the hash --- tests/ipfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 61086c15b1e..5394a062ffa 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -119,7 +119,7 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ ################################################################################ # First I have to publish: -IPNS_ID=$(ipfs name publish $IPFS_HASH --allow-offline | awk '{print substr($3,1,length($3)-1)}') +IPNS_ID=$(ipfs name publish $EMPTY_DIR_HASH --allow-offline | awk '{print substr($3,1,length($3)-1)}') # Check that we can upload the ipns store directly nix copy --to ipns://$IPNS_ID $(nix-build ./fixed.nix -A good) --experimental-features nix-command From 101e718a7bcd20370334bfde64feaecc18c30519 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 17 Jun 2020 15:37:01 -0400 Subject: [PATCH 066/104] Throw error if keyName is not found --- src/libstore/ipfs-binary-cache-store.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index cc09a516290..f386c1ad232 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -179,10 +179,13 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto keyListResponse = nlohmann::json::parse(*(getFileTransfer()->download(keyListRequest)).data); - std::string keyName; + std::string keyName {""}; for (nlohmann::json::iterator it = keyListResponse["Keys"].begin(); it != keyListResponse["Keys"].end(); ++it) if ((*it)["Id"] == ipnsPathHash) keyName = (*it)["Name"]; + if (keyName == "") { + throw Error("We couldn't find a name corresponding to the provided ipns hash:\n hash: %s", ipnsPathHash); + } // Now we can append the keyname to our original request uri += "&key=" + keyName; From 5caf620381dd809b08f0c881cc708f108b1e2a75 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 17 Jun 2020 16:53:53 -0400 Subject: [PATCH 067/104] Increment CURLOPT_EXPECT_100_TIMEOUT_MS to 5 min MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Curl’s Expect: 100-continue seems to mess up IPFS long response. I’m unsure why, but it may have something to do with how Nix is handling threads. Here, I set it to 5 minutes which should never be reached --- src/libstore/filetransfer.cc | 2 ++ src/libstore/ipfs-binary-cache-store.cc | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index b760c45671d..586ccbe14a1 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -324,6 +324,8 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L); curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, fileTransferSettings.stalledDownloadTimeout.get()); + curl_easy_setopt(req, CURLOPT_EXPECT_100_TIMEOUT_MS, 300000); + /* If no file exist in the specified path, curl continues to work anyway as if netrc support was disabled. */ curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str()); diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index ef259966e6f..b35322748ba 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -85,7 +85,7 @@ class IPFSBinaryCacheStore : public Store // Resolve the IPNS name to an IPFS object if (optIpnsPath) { auto ipnsPath = *optIpnsPath; - initialIpfsPath = resolveIPNSName(ipnsPath, true); + initialIpfsPath = resolveIPNSName(ipnsPath); state->ipfsPath = initialIpfsPath; } @@ -172,9 +172,9 @@ class IPFSBinaryCacheStore : public Store } // Resolve the IPNS name to an IPFS object - std::string resolveIPNSName(std::string ipnsPath, bool offline) { + std::string resolveIPNSName(std::string ipnsPath) { debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); - auto uri = daemonUri + "/api/v0/name/resolve?offline=" + (offline?"true":"false") + "&arg=" + getFileTransfer()->urlEncode(ipnsPath); + auto uri = daemonUri + "/api/v0/name/resolve?arg=" + getFileTransfer()->urlEncode(ipnsPath); FileTransferRequest request(uri); request.post = true; request.tries = 1; @@ -199,7 +199,7 @@ class IPFSBinaryCacheStore : public Store auto ipnsPath = *optIpnsPath; - auto resolvedIpfsPath = resolveIPNSName(ipnsPath, false); + auto resolvedIpfsPath = resolveIPNSName(ipnsPath); if (resolvedIpfsPath != initialIpfsPath) { throw Error("The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started;\n wanted: %s\n got %s\nPerhaps something else updated it in the meantime?", initialIpfsPath, resolvedIpfsPath); From 96e2ebdef33e64c49ec9308821aa416459e68bb1 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Wed, 17 Jun 2020 18:01:29 -0400 Subject: [PATCH 068/104] Add ipnsPath to mismatch error --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index f386c1ad232..994e71c4f90 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -146,7 +146,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto resolvedIpfsPath = resolveIPNSName(ipnsPath, true); if (resolvedIpfsPath != initialIpfsPath) { throw Error("The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started;\n wanted: %s\n got %s\nPerhaps something else updated it in the meantime?", - initialIpfsPath, resolvedIpfsPath); + ipnsPath, initialIpfsPath, resolvedIpfsPath); } if (resolvedIpfsPath == state->ipfsPath) { From e54f5bf0d747c8b17ce6d8e9e6ef316a2c7800b4 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 13:28:25 -0400 Subject: [PATCH 069/104] Try using e.info() instead of e.msg() --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 994e71c4f90..ea32c995d98 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -227,7 +227,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto json = nlohmann::json::parse(*res.data); addLink(path, "/ipfs/" + (std::string) json["Hash"]); } catch (FileTransferError & e) { - throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); + throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.info()); } } From 46701e0723a0f83df38c49ad2477db71b123a1cc Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 18 Jun 2020 15:02:00 -0400 Subject: [PATCH 070/104] Also allow for offline use --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index ea32c995d98..99d1aaa27b9 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -164,7 +164,7 @@ class IPFSBinaryCacheStore : public BinaryCacheStore debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); - auto uri = daemonUri + "/api/v0/name/publish?offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + auto uri = daemonUri + "/api/v0/name/publish?offline=true&allow-offline=true&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); // Given the hash, we want to discover the corresponding name in the // `ipfs key list` command, so that we publish to the right address in From 2c89c62d1992d26396a0119c0c19c638845dcca2 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 15:24:03 -0400 Subject: [PATCH 071/104] Set allow-offline=true on publish this is needed for our tests to succeed --- src/libstore/ipfs-binary-cache-store.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index b35322748ba..924d5d45b15 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -220,7 +220,8 @@ class IPFSBinaryCacheStore : public Store debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); - auto uri = daemonUri + "/api/v0/name/publish?arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + auto uri = daemonUri + "/api/v0/name/publish?allow-offline=true"; + uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); uri += "&key=" + std::string(ipnsPath, 6); auto req = FileTransferRequest(uri); From e9d538e93ed288427a2eaf5cbf03ccc8699fa3ac Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Thu, 18 Jun 2020 15:49:24 -0400 Subject: [PATCH 072/104] Simplify loop --- src/libstore/ipfs-binary-cache-store.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index a9866d82bae..32ed132ceab 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -184,9 +184,9 @@ class IPFSBinaryCacheStore : public BinaryCacheStore auto keyListResponse = nlohmann::json::parse(*(getFileTransfer()->download(keyListRequest)).data); std::string keyName {""}; - for (nlohmann::json::iterator it = keyListResponse["Keys"].begin(); it != keyListResponse["Keys"].end(); ++it) - if ((*it)["Id"] == ipnsPathHash) - keyName = (*it)["Name"]; + for (auto & key : keyListResponse["Keys"]) + if (key["Id"] == ipnsPathHash) + keyName = key["Name"]; if (keyName == "") { throw Error("We couldn't find a name corresponding to the provided ipns hash:\n hash: %s", ipnsPathHash); } From ed0e05d8c939e6c4f45c5aff8306002075aebaf4 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 15:57:42 -0400 Subject: [PATCH 073/104] Add note regarding key lookup --- src/libstore/ipfs-binary-cache-store.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 03c5768612f..3481ff2be8e 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -227,6 +227,9 @@ class IPFSBinaryCacheStore : public Store // `ipfs key list` command, so that we publish to the right address in // case the user has multiple ones available. + // NOTE: this is needed for ipfs < 0.5.0 because key must be a + // name, not an address. + auto ipnsPathHash = std::string(ipnsPath, 6); debug("Getting the name corresponding to hash %s", ipnsPathHash); From 81abf59a181c53bf836cd78f309b89aae4dea3f1 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 15:57:51 -0400 Subject: [PATCH 074/104] Verify a derivation with dependencies works in ipfs --- tests/ipfs.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 8acff2c2dbb..6a554ad3561 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -113,3 +113,11 @@ DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ --no-out-link \ -j0 \ --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) + +# Verify we can copy something with dependencies +outPath=$(nix-build dependencies.nix --no-out-link) + +nix copy $outPath --to ipns://$IPNS_ID --experimental-features nix-command + +# and copy back +nix copy $outPath --store file://$IPFS_DST_IPNS_STORE --from ipns://$IPNS_ID --experimental-features nix-command From d3608464168930a58669d0fef7af74f1fc5f2114 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 16:01:19 -0400 Subject: [PATCH 075/104] Revert "Move compareVersions to libutil" This reverts commit 23347cb3a946cc0adb10ecb8566d28ddd8f56cf9. we no longer need this with dd9bb11d0d38139bb32411170403171c4c92f8cf --- src/libstore/ipfs-binary-cache-store.cc | 2 +- src/libstore/names.cc | 56 ++++++++++++++++++++++- src/libstore/names.hh | 4 +- src/libutil/versions.cc | 61 ------------------------- src/libutil/versions.hh | 14 ------ 5 files changed, 59 insertions(+), 78 deletions(-) delete mode 100644 src/libutil/versions.cc delete mode 100644 src/libutil/versions.hh diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index a9866d82bae..f3c6038c1b1 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -4,7 +4,7 @@ #include "binary-cache-store.hh" #include "filetransfer.hh" #include "nar-info-disk-cache.hh" -#include "versions.hh" +#include "names.hh" namespace nix { diff --git a/src/libstore/names.cc b/src/libstore/names.cc index d9ed8fa68ef..d1c8a6101f8 100644 --- a/src/libstore/names.cc +++ b/src/libstore/names.cc @@ -1,5 +1,5 @@ #include "names.hh" -#include "versions.hh" +#include "util.hh" namespace nix { @@ -41,6 +41,60 @@ bool DrvName::matches(DrvName & n) } +string nextComponent(string::const_iterator & p, + const string::const_iterator end) +{ + /* Skip any dots and dashes (component separators). */ + while (p != end && (*p == '.' || *p == '-')) ++p; + + if (p == end) return ""; + + /* If the first character is a digit, consume the longest sequence + of digits. Otherwise, consume the longest sequence of + non-digit, non-separator characters. */ + string s; + if (isdigit(*p)) + while (p != end && isdigit(*p)) s += *p++; + else + while (p != end && (!isdigit(*p) && *p != '.' && *p != '-')) + s += *p++; + + return s; +} + + +static bool componentsLT(const string & c1, const string & c2) +{ + int n1, n2; + bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2); + + if (c1Num && c2Num) return n1 < n2; + else if (c1 == "" && c2Num) return true; + else if (c1 == "pre" && c2 != "pre") return true; + else if (c2 == "pre") return false; + /* Assume that `2.3a' < `2.3.1'. */ + else if (c2Num) return true; + else if (c1Num) return false; + else return c1 < c2; +} + + +int compareVersions(const string & v1, const string & v2) +{ + string::const_iterator p1 = v1.begin(); + string::const_iterator p2 = v2.begin(); + + while (p1 != v1.end() || p2 != v2.end()) { + string c1 = nextComponent(p1, v1.end()); + string c2 = nextComponent(p2, v2.end()); + if (componentsLT(c1, c2)) return -1; + else if (componentsLT(c2, c1)) return 1; + } + + return 0; +} + + DrvNames drvNamesFromArgs(const Strings & opArgs) { DrvNames result; diff --git a/src/libstore/names.hh b/src/libstore/names.hh index d682a69d078..00e14b8c797 100644 --- a/src/libstore/names.hh +++ b/src/libstore/names.hh @@ -3,7 +3,6 @@ #include #include "types.hh" -#include "versions.hh" #include namespace nix { @@ -25,6 +24,9 @@ private: typedef list DrvNames; +string nextComponent(string::const_iterator & p, + const string::const_iterator end); +int compareVersions(const string & v1, const string & v2); DrvNames drvNamesFromArgs(const Strings & opArgs); } diff --git a/src/libutil/versions.cc b/src/libutil/versions.cc deleted file mode 100644 index d38f3ec2518..00000000000 --- a/src/libutil/versions.cc +++ /dev/null @@ -1,61 +0,0 @@ -#include "util.hh" -#include "versions.hh" - -namespace nix { - - -string nextComponent(string::const_iterator & p, - const string::const_iterator end) -{ - /* Skip any dots and dashes (component separators). */ - while (p != end && (*p == '.' || *p == '-')) ++p; - - if (p == end) return ""; - - /* If the first character is a digit, consume the longest sequence - of digits. Otherwise, consume the longest sequence of - non-digit, non-separator characters. */ - string s; - if (isdigit(*p)) - while (p != end && isdigit(*p)) s += *p++; - else - while (p != end && (!isdigit(*p) && *p != '.' && *p != '-')) - s += *p++; - - return s; -} - - -static bool componentsLT(const string & c1, const string & c2) -{ - int n1, n2; - bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2); - - if (c1Num && c2Num) return n1 < n2; - else if (c1 == "" && c2Num) return true; - else if (c1 == "pre" && c2 != "pre") return true; - else if (c2 == "pre") return false; - /* Assume that `2.3a' < `2.3.1'. */ - else if (c2Num) return true; - else if (c1Num) return false; - else return c1 < c2; -} - - -int compareVersions(const string & v1, const string & v2) -{ - string::const_iterator p1 = v1.begin(); - string::const_iterator p2 = v2.begin(); - - while (p1 != v1.end() || p2 != v2.end()) { - string c1 = nextComponent(p1, v1.end()); - string c2 = nextComponent(p2, v2.end()); - if (componentsLT(c1, c2)) return -1; - else if (componentsLT(c2, c1)) return 1; - } - - return 0; -} - - -} diff --git a/src/libutil/versions.hh b/src/libutil/versions.hh deleted file mode 100644 index 41e6f0f417b..00000000000 --- a/src/libutil/versions.hh +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#include - -#include "types.hh" -#include - -namespace nix { - -string nextComponent(string::const_iterator & p, - const string::const_iterator end); -int compareVersions(const string & v1, const string & v2); - -} From ba7a4b75a010717c1ea2c31e46f2792af2fe132e Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 16:32:17 -0400 Subject: [PATCH 076/104] Readd ipfs gateway tests --- tests/ipfs.sh | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 6a554ad3561..cc0cee4afba 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -38,6 +38,8 @@ mkdir $IPFS_TESTS # method) IPFS_SRC_STORE=$IPFS_TESTS/ipfs_source_store +IPFS_DST_HTTP_STORE=$IPFS_TESTS/ipfs_dest_http_store +IPFS_DST_HTTP_LOCAL_STORE=$IPFS_TESTS/ipfs_dest_http_local_store IPFS_DST_IPFS_STORE=$IPFS_TESTS/ipfs_dest_ipfs_store IPFS_DST_IPNS_STORE=$IPFS_TESTS/ipfs_dest_ipns_store @@ -70,6 +72,23 @@ for path in $storePaths; do done unset path +MANUAL_IPFS_HASH=$(ipfs add -r $IPFS_SRC_STORE 2>/dev/null | tail -n 1 | awk '{print $2}') + +################################################################################ +## Create the local http store and download the derivation there +################################################################################ + +mkdir $IPFS_DST_HTTP_LOCAL_STORE + +IPFS_HTTP_LOCAL_PREFIX='http://localhost:8080/ipfs' + +nix-build ./fixed.nix -A good \ + --option substituters $IPFS_HTTP_LOCAL_PREFIX/$MANUAL_IPFS_HASH \ + --store $IPFS_DST_HTTP_LOCAL_STORE \ + --no-out-link \ + -j0 \ + --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) + ################################################################################ ## Create the ipfs store and download the derivation there ################################################################################ @@ -87,12 +106,12 @@ nix copy --to ipfs://$IPFS_HASH $(nix-build ./fixed.nix -A good) --experimental- mkdir $IPFS_DST_IPFS_STORE -DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ +nix-build ./fixed.nix -A good \ --option substituters 'ipfs://'$IPFS_HASH \ --store $IPFS_DST_IPFS_STORE \ --no-out-link \ -j0 \ - --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) + --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) ################################################################################ @@ -107,12 +126,12 @@ nix copy --to ipns://$IPNS_ID $(nix-build ./fixed.nix -A good) --experimental-fe mkdir $IPFS_DST_IPNS_STORE -DOWNLOAD_LOCATION=$(nix-build ./fixed.nix -A good \ +nix-build ./fixed.nix -A good \ --option substituters 'ipns://'$IPNS_ID \ --store $IPFS_DST_IPNS_STORE \ --no-out-link \ -j0 \ - --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE)) + --option trusted-public-keys $(cat $SIGNING_KEY_PUB_FILE) # Verify we can copy something with dependencies outPath=$(nix-build dependencies.nix --no-out-link) From 9e4268d0cb440c0894ccd5303c171eaad82132bd Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Thu, 18 Jun 2020 16:34:23 -0400 Subject: [PATCH 077/104] Add FIXME for CURLOPT_EXPECT_100_TIMEOUT_MS --- src/libstore/filetransfer.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 8579d14ac3b..1f485d18c88 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -325,6 +325,12 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L); curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, fileTransferSettings.stalledDownloadTimeout.get()); + /* FIXME: We hit a weird issue when 1 second goes by + * without Expect: 100-continue. curl_multi_perform + * appears to block indefinitely. To workaround this, we + * just set the timeout to a really big value unlikely to + * be hit in any server without Expect: 100-continue. This + * may specifically be a bug in the IPFS API. */ curl_easy_setopt(req, CURLOPT_EXPECT_100_TIMEOUT_MS, 300000); /* If no file exist in the specified path, curl continues to work From 96e642b41e632053f311613655c2bce87a19e56b Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 19 Jun 2020 11:58:32 -0400 Subject: [PATCH 078/104] =?UTF-8?q?Don=E2=80=99t=20include=20storeDir=20in?= =?UTF-8?q?=20ipfs=20dictionary?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes things a little simpler to convert back and forth. In addition, ipfs viewer messes up keys with / in them, so this makes things look cleaner. --- src/libstore/ipfs-binary-cache-store.cc | 45 +++++++++++-------------- 1 file changed, 19 insertions(+), 26 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 87c924ea428..1d7b86636e8 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -369,13 +369,13 @@ class IPFSBinaryCacheStore : public Store auto narMap = getIpfsDag(getIpfsPath())["nar"]; - json["references"] = nlohmann::json::array(); + json["references"] = nlohmann::json::object(); + json["hasSelfReference"] = false; for (auto & ref : narInfo->references) { - if (ref == narInfo->path) { - json["references"].push_back(printStorePath(ref)); - } else { - json["references"].push_back(narMap[printStorePath(ref)]); - } + if (ref == narInfo->path) + json["hasSelfReference"] = true; + else + json["references"].emplace(ref.to_string(), narMap[(std::string) ref.to_string()]); } if (narInfo->ca != "") @@ -397,8 +397,8 @@ class IPFSBinaryCacheStore : public Store } if (!narInfo->url.empty()) { - json["url"] = nlohmann::json::object(); - json["url"]["/"] = std::string(narInfo->url, 7); + json["ipfsCid"] = nlohmann::json::object(); + json["ipfsCid"]["/"] = std::string(narInfo->url, 7); } if (narInfo->fileHash) json["downloadHash"] = narInfo->fileHash.to_string(Base32, true); @@ -419,7 +419,7 @@ class IPFSBinaryCacheStore : public Store auto hashObject = nlohmann::json::object(); hashObject.emplace("/", std::string(narObjectPath, 6)); - json["nar"].emplace(printStorePath(narInfo->path), hashObject); + json["nar"].emplace(narInfo->path.to_string(), hashObject); state->ipfsPath = putIpfsDag(json); @@ -498,7 +498,7 @@ class IPFSBinaryCacheStore : public Store auto json = getIpfsDag(getIpfsPath()); if (!json.contains("nar")) return false; - return json["nar"].contains(printStorePath(storePath)); + return json["nar"].contains(storePath.to_string()); } void narFromPath(const StorePath & storePath, Sink & sink) override @@ -542,28 +542,21 @@ class IPFSBinaryCacheStore : public Store auto json = getIpfsDag(getIpfsPath()); - if (!json.contains("nar") || !json["nar"].contains(printStorePath(storePath))) + if (!json.contains("nar") || !json["nar"].contains(storePath.to_string())) return (*callbackPtr)(nullptr); - auto narObjectHash = (std::string) json["nar"][printStorePath(storePath)]["/"]; + auto narObjectHash = (std::string) json["nar"][(std::string) storePath.to_string()]["/"]; json = getIpfsDag("/ipfs/" + narObjectHash); NarInfo narInfo { storePath }; narInfo.narHash = Hash((std::string) json["narHash"]); narInfo.narSize = json["narSize"]; - auto narMap = getIpfsDag(getIpfsPath())["nar"]; - for (auto & ref : json["references"]) { - if (ref.type() == nlohmann::json::value_t::object) { - for (auto & v : narMap.items()) { - if (v.value() == ref) { - narInfo.references.insert(parseStorePath(v.key())); - break; - } - } - } else if (ref.type() == nlohmann::json::value_t::string) - narInfo.references.insert(parseStorePath((std::string) ref)); - } + for (auto & ref : json["references"].items()) + narInfo.references.insert(StorePath(ref.key())); + + if (json["hasSelfReference"]) + narInfo.references.insert(storePath); if (json.find("ca") != json.end()) narInfo.ca = json["ca"]; @@ -581,8 +574,8 @@ class IPFSBinaryCacheStore : public Store for (auto & sig : json["sigs"]) narInfo.sigs.insert((std::string) sig); - if (json.find("url") != json.end()) - narInfo.url = "ipfs://" + json["url"]["/"].get(); + if (json.find("ipfsCid") != json.end()) + narInfo.url = "ipfs://" + json["ipfsCid"]["/"].get(); if (json.find("downloadHash") != json.end()) narInfo.fileHash = Hash((std::string) json["downloadHash"]); From 90535e82999719d85a67e6297fff02ace3821b01 Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 19 Jun 2020 12:02:54 -0400 Subject: [PATCH 079/104] Always include empty fields in ipfs json MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes things more predictable, so we don’t have to guess whether a value is in the dict or not. Also should make defining a JSON schema easier. --- src/libstore/ipfs-binary-cache-store.cc | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 1d7b86636e8..2719749e848 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -378,33 +378,27 @@ class IPFSBinaryCacheStore : public Store json["references"].emplace(ref.to_string(), narMap[(std::string) ref.to_string()]); } - if (narInfo->ca != "") - json["ca"] = narInfo->ca; + json["ca"] = narInfo->ca; if (narInfo->deriver) json["deriver"] = printStorePath(*narInfo->deriver); - if (narInfo->registrationTime) - json["registrationTime"] = narInfo->registrationTime; + json["registrationTime"] = narInfo->registrationTime; + json["ultimate"] = narInfo->ultimate; - if (narInfo->ultimate) - json["ultimate"] = narInfo->ultimate; - - if (!narInfo->sigs.empty()) { - json["sigs"] = nlohmann::json::array(); - for (auto & sig : narInfo->sigs) - json["sigs"].push_back(sig); - } + json["sigs"] = nlohmann::json::array(); + for (auto & sig : narInfo->sigs) + json["sigs"].push_back(sig); if (!narInfo->url.empty()) { json["ipfsCid"] = nlohmann::json::object(); json["ipfsCid"]["/"] = std::string(narInfo->url, 7); } + if (narInfo->fileHash) json["downloadHash"] = narInfo->fileHash.to_string(Base32, true); - if (narInfo->fileSize) - json["downloadSize"] = narInfo->fileSize; + json["downloadSize"] = narInfo->fileSize; json["compression"] = narInfo->compression; json["system"] = narInfo->system; From dba20fa3b01a7f7aa9dca864b4d2bfa54090972f Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Jun 2020 16:49:53 +0000 Subject: [PATCH 080/104] Allow blank CA field in IPFS store to indicate not content addressed --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 62d6b9cd27e..0fafa1c316d 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -553,7 +553,7 @@ class IPFSBinaryCacheStore : public Store narInfo.references.insert(storePath); if (json.find("ca") != json.end()) - narInfo.ca = parseContentAddress(json["ca"].get()); + narInfo.ca = parseContentAddressOpt(json["ca"].get()); if (json.find("deriver") != json.end()) narInfo.deriver = parseStorePath((std::string) json["deriver"]); From ec61165e06196a6f2205ff03e750cd0bd6927f56 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Jun 2020 19:06:47 +0000 Subject: [PATCH 081/104] Structure content addresses in IPFS store --- src/libstore/content-address.cc | 58 +++++++++++++++++++++++++ src/libstore/content-address.hh | 9 ++++ src/libstore/ipfs-binary-cache-store.cc | 4 +- 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc index 3d753836f8a..0a679c5e046 100644 --- a/src/libstore/content-address.cc +++ b/src/libstore/content-address.cc @@ -1,3 +1,5 @@ +#include + #include "content-address.hh" namespace nix { @@ -82,4 +84,60 @@ std::string renderContentAddress(std::optional ca) { return ca ? renderContentAddress(*ca) : ""; } + +void to_json(nlohmann::json& j, const ContentAddress & ca) { + j = std::visit(overloaded { + [](TextHash th) { + return nlohmann::json { + { "type", "text" }, + { "hash", th.hash.to_string(Base32, false) }, + }; + }, + [](FixedOutputHash foh) { + return nlohmann::json { + { "type", "fixed" }, + { "method", foh.method == FileIngestionMethod::Flat ? "flat" : "recursive" }, + { "algo", printHashType(*foh.hash.type) }, + { "hash", foh.hash.to_string(Base32, false) }, + }; + } + }, ca); +} + +void from_json(const nlohmann::json& j, ContentAddress & ca) { + std::string_view type = j.at("type").get(); + if (type == "text") { + ca = TextHash { + .hash = Hash { j.at("hash").get(), htSHA256 }, + }; + } else if (type == "fixed") { + std::string_view methodRaw = j.at("method").get(); + auto method = methodRaw == "flat" ? FileIngestionMethod::Flat + : methodRaw == "recursive" ? FileIngestionMethod::Recursive + : throw Error("invalid file ingestion method: %s", methodRaw); + auto algo = parseHashType(j.at("algo").get()); + ca = FixedOutputHash { + .method = method, + .hash = Hash { j.at("hash").get(), algo }, + }; + } else + throw Error("invalid type: %s", type); +} + +// Needed until https://github.com/nlohmann/json/pull/2117 + +void to_json(nlohmann::json& j, const std::optional & c) { + if (!c) + j = nullptr; + else + to_json(j, *c); +} + +void from_json(const nlohmann::json& j, std::optional & c) { + if (j.is_null()) + c = std::nullopt; + else + c = j.get(); +} + } diff --git a/src/libstore/content-address.hh b/src/libstore/content-address.hh index ba4797f5b00..45a2b256cc4 100644 --- a/src/libstore/content-address.hh +++ b/src/libstore/content-address.hh @@ -1,6 +1,8 @@ #pragma once +#include #include + #include "hash.hh" namespace nix { @@ -53,4 +55,11 @@ ContentAddress parseContentAddress(std::string_view rawCa); std::optional parseContentAddressOpt(std::string_view rawCaOpt); +void to_json(nlohmann::json& j, const ContentAddress & c); +void from_json(const nlohmann::json& j, ContentAddress & c); + +// Needed until https://github.com/nlohmann/json/pull/211 + +void to_json(nlohmann::json& j, const std::optional & c); +void from_json(const nlohmann::json& j, std::optional & c); } diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 0fafa1c316d..fa2cb4d2c29 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -378,7 +378,7 @@ class IPFSBinaryCacheStore : public Store json["references"].emplace(ref.to_string(), narMap[(std::string) ref.to_string()]); } - json["ca"] = renderContentAddress(narInfo->ca); + json["ca"] = narInfo->ca; if (narInfo->deriver) json["deriver"] = printStorePath(*narInfo->deriver); @@ -553,7 +553,7 @@ class IPFSBinaryCacheStore : public Store narInfo.references.insert(storePath); if (json.find("ca") != json.end()) - narInfo.ca = parseContentAddressOpt(json["ca"].get()); + json["ca"].get_to(narInfo.ca); if (json.find("deriver") != json.end()) narInfo.deriver = parseStorePath((std::string) json["deriver"]); From 97b86490ad16595bed779f0dbf4deb1bc27f9f40 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Jun 2020 20:42:17 +0000 Subject: [PATCH 082/104] Rename local variable `hash` -> `hashAlgo` --- src/libstore/content-address.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc index 0a679c5e046..b9d7a29addd 100644 --- a/src/libstore/content-address.cc +++ b/src/libstore/content-address.cc @@ -115,10 +115,10 @@ void from_json(const nlohmann::json& j, ContentAddress & ca) { auto method = methodRaw == "flat" ? FileIngestionMethod::Flat : methodRaw == "recursive" ? FileIngestionMethod::Recursive : throw Error("invalid file ingestion method: %s", methodRaw); - auto algo = parseHashType(j.at("algo").get()); + auto hashAlgo = parseHashType(j.at("algo").get()); ca = FixedOutputHash { .method = method, - .hash = Hash { j.at("hash").get(), algo }, + .hash = Hash { j.at("hash").get(), hashAlgo }, }; } else throw Error("invalid type: %s", type); From 13512c1df623ff3cdfa4d8241c1eab945a3041d7 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Jun 2020 21:42:18 +0000 Subject: [PATCH 083/104] `nlohmann_json` is now used in a header used by the perl bindings That functionality isn't actually used by the perl bindings, however, so we could add some CPP to make a it a private dep again. --- release.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/release.nix b/release.nix index 4e62d619dae..2aa5dbfd4ae 100644 --- a/release.nix +++ b/release.nix @@ -91,6 +91,7 @@ let pkgconfig pkgs.perl boost + nlohmann_json ] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; From 8d1e710826e61c8acadc2b4bbdcf5f9aec688a11 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 22 Jun 2020 23:58:31 +0000 Subject: [PATCH 084/104] Create permentant IPFS roots for `addTempRoot` I am not sure whether this is a good idea. --- src/libstore/ipfs-binary-cache-store.cc | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index fa2cb4d2c29..258ce33101c 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -644,6 +644,19 @@ class IPFSBinaryCacheStore : public Store writeNarInfo(narInfo); } + virtual void addTempRoot(const StorePath & path) override + { + // TODO make temporary pin/addToStore, see + // https://github.com/ipfs/go-ipfs/issues/4559 and + // https://github.com/ipfs/go-ipfs/issues/4328 for some ideas. + auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/" + string { path.to_string() }; + + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; + getFileTransfer()->upload(request); + } + std::shared_ptr getBuildLog(const StorePath & path) override { unsupported("getBuildLog"); } From 6a779a544cdff1dce3a984b74a222ec8518a9aea Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Tue, 23 Jun 2020 13:11:44 -0400 Subject: [PATCH 085/104] Better error for ipfs address mismatch --- src/libstore/ipfs-binary-cache-store.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index fa2cb4d2c29..8c09036c410 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -190,6 +190,13 @@ class IPFSBinaryCacheStore : public Store } public: + Path formatPathAsProtocol(Path path) { + if (hasPrefix(path, "/ipfs/")) + return "ipfs://" + path.substr(strlen("/ipfs/"), string::npos); + else if (hasPrefix(path, "/ipns/")) + return "ipns://" + path.substr(strlen("/ipfs/"), string::npos); + else return path; + } // IPNS publish can be slow, we try to do it rarely. void sync() override @@ -197,8 +204,8 @@ class IPFSBinaryCacheStore : public Store auto state(_state.lock()); if (!optIpnsPath) { - throw Error("We don't have an ipns path and the current ipfs address doesn't match the initial one.\n current: %s\n initial: %s", - state->ipfsPath, initialIpfsPath); + throw Error("The current IPFS address doesn't match the configured one. \n initial: %s\n current: %s", + formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); } auto ipnsPath = *optIpnsPath; From c354ba61fd0816df294afa1185355c4bc1799675 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Tue, 23 Jun 2020 14:13:28 -0400 Subject: [PATCH 086/104] Fix the tests --- tests/ipfs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index cc0cee4afba..2c5bbe3a74a 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -99,7 +99,7 @@ EMPTY_HASH=$(echo {} | ipfs dag put) IPFS_HASH=$(set -e; \ set -o pipefail; \ ! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command \ - |& grep current: | awk '{print substr($2, 7, length($2))}') + |& grep current: | awk '{print substr($2, 8, length($2))}') # Verify that new path is valid. nix copy --to ipfs://$IPFS_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command From d8bd3cf1c7f1447700d35edb1418bb117e512cfd Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Tue, 23 Jun 2020 14:39:27 -0400 Subject: [PATCH 087/104] Clean up grepping for right ipfs address in tests --- tests/ipfs.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 2c5bbe3a74a..3b3f25d420f 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -96,18 +96,18 @@ nix-build ./fixed.nix -A good \ EMPTY_HASH=$(echo {} | ipfs dag put) # Try to upload the content to the empty directory, fail but grab the right hash -IPFS_HASH=$(set -e; \ +IPFS_ADDRESS=$(set -e; \ set -o pipefail; \ ! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command \ - |& grep current: | awk '{print substr($2, 8, length($2))}') + |& grep current: | awk '{print $2}') # Verify that new path is valid. -nix copy --to ipfs://$IPFS_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command +nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good) --experimental-features nix-command mkdir $IPFS_DST_IPFS_STORE nix-build ./fixed.nix -A good \ - --option substituters 'ipfs://'$IPFS_HASH \ + --option substituters $IPFS_ADDRESS \ --store $IPFS_DST_IPFS_STORE \ --no-out-link \ -j0 \ From 0a9f1a54afe6a6c75597be295e9df842e0c0bad1 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Jun 2020 20:03:12 +0000 Subject: [PATCH 088/104] Fix pin relative path --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index ed45defc829..8ba54a043f4 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -656,7 +656,7 @@ class IPFSBinaryCacheStore : public Store // TODO make temporary pin/addToStore, see // https://github.com/ipfs/go-ipfs/issues/4559 and // https://github.com/ipfs/go-ipfs/issues/4328 for some ideas. - auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/" + string { path.to_string() }; + auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/nar/" + string { path.to_string() }; FileTransferRequest request(uri); request.post = true; From 7477c774af1664271660f0ba6d186f2f7542eab0 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 23 Jun 2020 20:05:30 +0000 Subject: [PATCH 089/104] Use adjacent string literals to better show intent --- src/libstore/ipfs-binary-cache-store.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 8ba54a043f4..0cf83a1f3fb 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -656,7 +656,7 @@ class IPFSBinaryCacheStore : public Store // TODO make temporary pin/addToStore, see // https://github.com/ipfs/go-ipfs/issues/4559 and // https://github.com/ipfs/go-ipfs/issues/4328 for some ideas. - auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/nar/" + string { path.to_string() }; + auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/" "nar" "/" + string { path.to_string() }; FileTransferRequest request(uri); request.post = true; From 2fa86763f99b05804d4d2b6995297a0d0163e5e4 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 24 Jun 2020 16:52:52 -0400 Subject: [PATCH 090/104] Add test case for this --- tests/ipfs.sh | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 2c5bbe3a74a..ac7bde716cc 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -43,6 +43,29 @@ IPFS_DST_HTTP_LOCAL_STORE=$IPFS_TESTS/ipfs_dest_http_local_store IPFS_DST_IPFS_STORE=$IPFS_TESTS/ipfs_dest_ipfs_store IPFS_DST_IPNS_STORE=$IPFS_TESTS/ipfs_dest_ipns_store +EMPTY_HASH=$(echo {} | ipfs dag put) + +################################################################################ +## Check that fetchurl works directly with the ipfs store +################################################################################ + +TEST_FILE=test-file.txt +touch $TEST_FILE + +# We try to do the evaluation with a known wrong hash to get the suggestion for +# the correct one +! CORRECT_ADDRESS=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store ipfs://$EMPTY_HASH |& \ + grep 'current:' | awk '{print $2}') + +# Then we eval and get back the hash-name part of the store path +RESULT=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store $CORRECT_ADDRESS --json \ + | jq -r | awk -F/ '{print $NF}') + +# Finally, we ask back the info from IPFS (formatting the address the right way +# beforehand) +ADDRESS_IPFS_FORMATTED=$(echo $CORRECT_ADDRESS | awk -F/ '{print $3}') +ipfs dag get /ipfs/$ADDRESS_IPFS_FORMATTED/nar/$RESULT + ################################################################################ ## Generate the keys to sign the store ################################################################################ @@ -93,8 +116,6 @@ nix-build ./fixed.nix -A good \ ## Create the ipfs store and download the derivation there ################################################################################ -EMPTY_HASH=$(echo {} | ipfs dag put) - # Try to upload the content to the empty directory, fail but grab the right hash IPFS_HASH=$(set -e; \ set -o pipefail; \ From 1d7ceb064aa4bc4ee314cdfd27f87d9fc6ad781c Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 24 Jun 2020 16:53:13 -0400 Subject: [PATCH 091/104] Add sync and correct hash conditions --- src/libfetchers/tarball.cc | 2 ++ src/libstore/ipfs-binary-cache-store.cc | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index f5356f0af8b..68e6ba31db1 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -76,6 +76,7 @@ DownloadFileResult downloadFile( }; auto source = StringSource { *sink.s }; store->addToStore(info, source, NoRepair, NoCheckSigs); + store->sync(); storePath = std::move(info.path); } @@ -146,6 +147,7 @@ Tree downloadTarball( auto topDir = tmpDir + "/" + members.begin()->name; lastModified = lstat(topDir).st_mtime; unpackedStorePath = store->addToStore(name, topDir, FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, NoRepair); + store->sync(); } Attrs infoAttrs({ diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 0cf83a1f3fb..7505953e95d 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -204,8 +204,11 @@ class IPFSBinaryCacheStore : public Store auto state(_state.lock()); if (!optIpnsPath) { - throw Error("The current IPFS address doesn't match the configured one. \n initial: %s\n current: %s", - formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); + if (initialIpfsPath != state->ipfsPath) + throw Error("The current IPFS address doesn't match the configured one. \n initial: %s\n current: %s", + formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); + else + return; } auto ipnsPath = *optIpnsPath; From 64b25a6710339a3ab247dbcc565a03792aaed467 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 30 Jun 2020 18:47:41 +0000 Subject: [PATCH 092/104] Add jq as test dep --- release-common.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/release-common.nix b/release-common.nix index 340741412d4..5b72dc65bed 100644 --- a/release-common.nix +++ b/release-common.nix @@ -55,6 +55,7 @@ rec { git mercurial gmock + jq ] ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium From bcd293ef53eddd7c169bd6c30dd6d4cc84888e1b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 30 Jun 2020 18:20:18 +0000 Subject: [PATCH 093/104] Sync stores at the end of commands, not within This should make it easier to ensure we always sync once, and don't wastefully sync intermediate. --- src/libfetchers/tarball.cc | 2 -- src/libstore/store-api.cc | 2 -- src/nix-build/nix-build.cc | 2 ++ src/nix-channel/nix-channel.cc | 2 ++ src/nix-collect-garbage/nix-collect-garbage.cc | 1 + src/nix-copy-closure/nix-copy-closure.cc | 3 +++ src/nix-env/nix-env.cc | 2 ++ src/nix-instantiate/nix-instantiate.cc | 3 +++ src/nix-prefetch-url/nix-prefetch-url.cc | 2 ++ src/nix-store/nix-store.cc | 8 +++----- src/nix/add-to-store.cc | 1 - src/nix/command.cc | 4 +++- src/nix/copy.cc | 2 ++ src/nix/make-content-addressable.cc | 2 -- 14 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index 68e6ba31db1..f5356f0af8b 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -76,7 +76,6 @@ DownloadFileResult downloadFile( }; auto source = StringSource { *sink.s }; store->addToStore(info, source, NoRepair, NoCheckSigs); - store->sync(); storePath = std::move(info.path); } @@ -147,7 +146,6 @@ Tree downloadTarball( auto topDir = tmpDir + "/" + members.begin()->name; lastModified = lstat(topDir).st_mtime; unpackedStorePath = store->addToStore(name, topDir, FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, NoRepair); - store->sync(); } Attrs infoAttrs({ diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 8c44d25ffef..e4a4ae11e11 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -681,8 +681,6 @@ void copyPaths(ref srcStore, ref dstStore, const StorePathSet & st nrDone++; showProgress(); }); - - dstStore->sync(); } diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index f77de56eab1..0eda87af282 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -527,6 +527,8 @@ static void _main(int argc, char * * argv) if (auto store2 = store.dynamic_pointer_cast()) store2->addPermRoot(store->parseStorePath(symlink.second), absPath(symlink.first), true); + store->sync(); + logger->stop(); for (auto & path : outPaths) diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 3ccf620c946..1ca263a68ea 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -120,6 +120,8 @@ static void update(const StringSet & channelNames) // Regardless of where it came from, add the expression representing this channel to accumulated expression exprs.push_back("f: f { name = \"" + cname + "\"; channelName = \"" + name + "\"; src = builtins.storePath \"" + filename + "\"; " + extraAttrs + " }"); + + store->sync(); } // Unpack the channel tarballs into the Nix store and install them diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index aa5ada3a62b..d7e1d615f7c 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -88,6 +88,7 @@ static int _main(int argc, char * * argv) GCResults results; PrintFreed freed(true, results); store->collectGarbage(options, results); + store->sync(); } return 0; diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index b10184718d1..5fa676b67eb 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -61,6 +61,9 @@ static int _main(int argc, char ** argv) copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes); + from->sync(); + to->sync(); + return 0; } } diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 8b069203503..41053714977 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1455,6 +1455,8 @@ static int _main(int argc, char * * argv) globals.state->printStats(); + store->sync(); + logger->stop(); return 0; diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index bf353677a5b..e6d6646673b 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -169,6 +169,7 @@ static int _main(int argc, char * * argv) if (p == "") throw Error("unable to find '%1%'", i); std::cout << p << std::endl; } + store->sync(); return 0; } @@ -189,6 +190,8 @@ static int _main(int argc, char * * argv) state->printStats(); + store->sync(); + return 0; } } diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc index 40b05a2f39e..242945924f3 100644 --- a/src/nix-prefetch-url/nix-prefetch-url.cc +++ b/src/nix-prefetch-url/nix-prefetch-url.cc @@ -227,6 +227,8 @@ static int _main(int argc, char * * argv) if (printPath) std::cout << store->printStorePath(*storePath) << std::endl; + store->sync(); + return 0; } } diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 6d5063de0e0..67e92ed43e3 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -167,8 +167,6 @@ static void opAdd(Strings opFlags, Strings opArgs) for (auto & i : opArgs) cout << fmt("%s\n", store->printStorePath(store->addToStore(std::string(baseNameOf(i)), i))); - - store->sync(); } @@ -190,8 +188,6 @@ static void opAddFixed(Strings opFlags, Strings opArgs) for (auto & i : opArgs) cout << fmt("%s\n", store->printStorePath(store->addToStore(std::string(baseNameOf(i)), i, recursive, hashAlgo))); - - store->sync(); } @@ -964,7 +960,6 @@ static void opServe(Strings opFlags, Strings opArgs) SizedSource sizedSource(in, info.narSize); store->addToStore(info, sizedSource, NoRepair, NoCheckSigs); - store->sync(); // consume all the data that has been sent before continuing. sizedSource.drainAll(); @@ -1111,6 +1106,9 @@ static int _main(int argc, char * * argv) op(opFlags, opArgs); + if (store) + store->sync(); + logger->stop(); return 0; diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc index 9f3ac2ebe4f..f9d6de16eea 100644 --- a/src/nix/add-to-store.cc +++ b/src/nix/add-to-store.cc @@ -56,7 +56,6 @@ struct CmdAddToStore : MixDryRun, StoreCommand if (!dryRun) { auto source = StringSource { *sink.s }; store->addToStore(info, source); - store->sync(); } logger->stdout("%s", store->printStorePath(info.path)); diff --git a/src/nix/command.cc b/src/nix/command.cc index 3651a9e9ca7..2d6f11bd106 100644 --- a/src/nix/command.cc +++ b/src/nix/command.cc @@ -28,7 +28,9 @@ ref StoreCommand::createStore() void StoreCommand::run() { - run(getStore()); + auto store = getStore(); + run(store); + store->sync(); } StorePathsCommand::StorePathsCommand(bool recursive) diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 64099f47643..8f5e7251032 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -96,6 +96,8 @@ struct CmdCopy : StorePathsCommand copyPaths(srcStore, dstStore, StorePathSet(storePaths.begin(), storePaths.end()), NoRepair, checkSigs, substitute); + + dstStore->sync(); } }; diff --git a/src/nix/make-content-addressable.cc b/src/nix/make-content-addressable.cc index f938777b6df..fb36fc410f3 100644 --- a/src/nix/make-content-addressable.cc +++ b/src/nix/make-content-addressable.cc @@ -103,8 +103,6 @@ struct CmdMakeContentAddressable : StorePathsCommand, MixJSON remappings.insert_or_assign(std::move(path), std::move(info.path)); } - - store->sync(); } }; From 0838c21de9a26c2babfedfcd81af54000fe4793b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 30 Jun 2020 20:23:27 +0000 Subject: [PATCH 094/104] Add Store::sync to daemon protocol --- src/libstore/daemon.cc | 8 ++++++++ src/libstore/remote-store.cc | 9 +++++++++ src/libstore/remote-store.hh | 2 ++ src/libstore/worker-protocol.hh | 1 + 4 files changed, 20 insertions(+) diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 14920afcb0d..3e9db2b5dc0 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -755,6 +755,14 @@ static void performOp(TunnelLogger * logger, ref store, break; } + case wopSync: { + logger->startWork(); + store->sync(); + logger->stopWork(); + to << 1; + break; + } + default: throw Error("invalid operation %1%", op); } diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index b7cc7a5fc62..b0d66f90837 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -698,6 +698,15 @@ void RemoteStore::queryMissing(const std::vector & targets } +void RemoteStore::sync() +{ + auto conn(getConnection()); + conn->to << wopSync; + conn.processStderr(); + readInt(conn->from); +} + + void RemoteStore::connect() { auto conn(getConnection()); diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 80c8e9f1168..2f8c8543264 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -100,6 +100,8 @@ public: unsigned int getProtocol() override; + void sync() override; + void flushBadConnections(); protected: diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index ac42457fc41..08f85702c0e 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -49,6 +49,7 @@ typedef enum { wopNarFromPath = 38, wopAddToStoreNar = 39, wopQueryMissing = 40, + wopSync = 41, } WorkerOp; From 58c772d205bc9eef29403c024057769468243395 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Fri, 3 Jul 2020 00:16:20 +0000 Subject: [PATCH 095/104] IPFS tests: more no out-link --- tests/ipfs.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 025aebebd55..c56c82638da 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -81,7 +81,7 @@ nix-store --generate-binary-cache-key $SIGNING_KEY_NAME $SIGNING_KEY_PRI_FILE $S ################################################################################ mkdir -p $IPFS_SRC_STORE -storePaths=$(nix-build ./fixed.nix -A good) +storePaths=$(nix-build ./fixed.nix -A good --no-out-link) nix sign-paths -k $SIGNING_KEY_PRI_FILE $storePaths @@ -119,11 +119,11 @@ nix-build ./fixed.nix -A good \ # Try to upload the content to the empty directory, fail but grab the right hash IPFS_ADDRESS=$(set -e; \ set -o pipefail; \ - ! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good) --experimental-features nix-command \ + ! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command \ |& grep current: | awk '{print $2}') # Verify that new path is valid. -nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good) --experimental-features nix-command +nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command mkdir $IPFS_DST_IPFS_STORE @@ -143,7 +143,7 @@ nix-build ./fixed.nix -A good \ IPNS_ID=$(ipfs name publish $EMPTY_HASH --allow-offline | awk '{print substr($3,1,length($3)-1)}') # Check that we can upload the ipns store directly -nix copy --to ipns://$IPNS_ID $(nix-build ./fixed.nix -A good) --experimental-features nix-command +nix copy --to ipns://$IPNS_ID $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command mkdir $IPFS_DST_IPNS_STORE From 222703d0c0f2e9be65b3a48714657d39bd91fa7d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Mon, 6 Jul 2020 19:01:36 +0000 Subject: [PATCH 096/104] Improve IPFS error messages --- src/libstore/ipfs-binary-cache-store.cc | 16 ++++++++++++++-- tests/ipfs.sh | 4 ++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 7505953e95d..431f806ed19 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -205,7 +205,14 @@ class IPFSBinaryCacheStore : public Store if (!optIpnsPath) { if (initialIpfsPath != state->ipfsPath) - throw Error("The current IPFS address doesn't match the configured one. \n initial: %s\n current: %s", + throw Error( + "You performed store-modifying actions, creating a new store whose IPFS address doesn't match the configured one:\n" + " configured: %s\n" + " modified: %s\n" + "\n" + "This happens when one has configured nix to use a store via an IPFS hash. Since the store is immutable a new one is made (functional update) and the \"modified\" is its hash. Nix isn't going to statefully switch to using that hash, however, because that would be in violation of the configuration Nix has been given.\n" + "\n" + "You can change you configuration to use this hash, and run the command again in which case it will succeed with a no-opt. But if you are going to modify the store on a regular basis you should use DNS-link or IPNS instead so you have a properly mutable store, and avoid getting this message again.", formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); else return; @@ -215,7 +222,12 @@ class IPFSBinaryCacheStore : public Store auto resolvedIpfsPath = resolveIPNSName(ipnsPath); if (resolvedIpfsPath != initialIpfsPath) { - throw Error("The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started;\n wanted: %s\n got %s\nPerhaps something else updated it in the meantime?", + throw Error( + "The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started:\n" + " expected: %s\n" + " got %s\n" + "\n" + "Perhaps something else updated it in the meantime?", ipnsPath, initialIpfsPath, resolvedIpfsPath); } diff --git a/tests/ipfs.sh b/tests/ipfs.sh index c56c82638da..673ed060a38 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -55,7 +55,7 @@ touch $TEST_FILE # We try to do the evaluation with a known wrong hash to get the suggestion for # the correct one ! CORRECT_ADDRESS=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store ipfs://$EMPTY_HASH |& \ - grep 'current:' | awk '{print $2}') + grep 'modified:' | awk '{print $2}') # Then we eval and get back the hash-name part of the store path RESULT=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store $CORRECT_ADDRESS --json \ @@ -120,7 +120,7 @@ nix-build ./fixed.nix -A good \ IPFS_ADDRESS=$(set -e; \ set -o pipefail; \ ! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command \ - |& grep current: | awk '{print $2}') + |& grep modified: | awk '{print $2}') # Verify that new path is valid. nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command From bd00f74134f32e1904f8e5528646ed0e4c2c52df Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Tue, 7 Jul 2020 15:35:48 -0400 Subject: [PATCH 097/104] Create header for ipfs-binary-cache-store --- src/libstore/ipfs-binary-cache-store.cc | 1043 +++++++++++------------ src/libstore/ipfs-binary-cache-store.hh | 130 +++ 2 files changed, 617 insertions(+), 556 deletions(-) create mode 100644 src/libstore/ipfs-binary-cache-store.hh diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 431f806ed19..e5a2ab799cd 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -7,692 +7,623 @@ #include "archive.hh" #include "compression.hh" #include "names.hh" +#include "ipfs-binary-cache-store.hh" namespace nix { -MakeError(UploadToIPFS, Error); - -class IPFSBinaryCacheStore : public Store +IPFSBinaryCacheStore::IPFSBinaryCacheStore( + const Params & params, const Path & _cacheUri) + : Store(params) + , cacheUri(_cacheUri) { + auto state(_state.lock()); + + if (secretKeyFile != "") + secretKey = std::unique_ptr(new SecretKey(readFile(secretKeyFile))); + + StringSink sink; + sink << narVersionMagic1; + narMagic = *sink.s; + + if (cacheUri.back() == '/') + cacheUri.pop_back(); + + if (hasPrefix(cacheUri, "ipfs://")) { + initialIpfsPath = "/ipfs/" + std::string(cacheUri, 7); + state->ipfsPath = initialIpfsPath; + } else if (hasPrefix(cacheUri, "ipns://")) + optIpnsPath = "/ipns/" + std::string(cacheUri, 7); + else + throw Error("unknown IPNS URI '%s'", cacheUri); + + std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); + std::string ipfsAPIPort(get(params, "port").value_or("5001")); + daemonUri = "http://" + ipfsAPIHost + ":" + ipfsAPIPort; + + // Check the IPFS daemon is running + FileTransferRequest request(daemonUri + "/api/v0/version"); + request.post = true; + request.tries = 1; + auto res = getFileTransfer()->download(request); + auto versionInfo = nlohmann::json::parse(*res.data); + if (versionInfo.find("Version") == versionInfo.end()) + throw Error("daemon for IPFS is not running properly"); + + if (compareVersions(versionInfo["Version"], "0.4.0") < 0) + throw Error("daemon for IPFS is %s, when a minimum of 0.4.0 is required", versionInfo["Version"]); -public: - - const Setting compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; - const Setting secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; - const Setting parallelCompression{this, false, "parallel-compression", - "enable multi-threading compression, available for xz only currently"}; - -private: - - std::unique_ptr secretKey; - std::string narMagic; - - std::string cacheUri; - std::string daemonUri; - - std::string getIpfsPath() { - auto state(_state.lock()); - return state->ipfsPath; + // Resolve the IPNS name to an IPFS object + if (optIpnsPath) { + auto ipnsPath = *optIpnsPath; + initialIpfsPath = resolveIPNSName(ipnsPath); + state->ipfsPath = initialIpfsPath; } - std::string initialIpfsPath; - std::optional optIpnsPath; - struct State - { - std::string ipfsPath; - }; - Sync _state; + auto json = getIpfsDag(state->ipfsPath); -public: + // Verify StoreDir is correct + if (json.find("StoreDir") == json.end()) { + json["StoreDir"] = storeDir; + state->ipfsPath = putIpfsDag(json); + } else if (json["StoreDir"] != storeDir) + throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'", + getUri(), json["StoreDir"], storeDir); - IPFSBinaryCacheStore( - const Params & params, const Path & _cacheUri) - : Store(params) - , cacheUri(_cacheUri) - { - auto state(_state.lock()); + if (json.find("WantMassQuery") != json.end()) + wantMassQuery.setDefault(json["WantMassQuery"] ? "true" : "false"); - if (secretKeyFile != "") - secretKey = std::unique_ptr(new SecretKey(readFile(secretKeyFile))); + if (json.find("Priority") != json.end()) + priority.setDefault(fmt("%d", json["Priority"])); +} - StringSink sink; - sink << narVersionMagic1; - narMagic = *sink.s; +std::string IPFSBinaryCacheStore::putIpfsDag(nlohmann::json data) +{ + auto req = FileTransferRequest(daemonUri + "/api/v0/dag/put"); + req.data = std::make_shared(data.dump()); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->upload(req); + auto json = nlohmann::json::parse(*res.data); + return "/ipfs/" + (std::string) json["Cid"]["/"]; +} - if (cacheUri.back() == '/') - cacheUri.pop_back(); +nlohmann::json IPFSBinaryCacheStore::getIpfsDag(std::string objectPath) +{ + auto req = FileTransferRequest(daemonUri + "/api/v0/dag/get?arg=" + objectPath); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->download(req); + auto json = nlohmann::json::parse(*res.data); + return json; +} - if (hasPrefix(cacheUri, "ipfs://")) { - initialIpfsPath = "/ipfs/" + std::string(cacheUri, 7); - state->ipfsPath = initialIpfsPath; - } else if (hasPrefix(cacheUri, "ipns://")) - optIpnsPath = "/ipns/" + std::string(cacheUri, 7); - else - throw Error("unknown IPNS URI '%s'", cacheUri); +std::optional IPFSBinaryCacheStore::isDNSLinkPath(std::string path) +{ + if (path.find("/ipns/") != 0) + throw Error("path '%s' is not an ipns path", path); + auto subpath = std::string(path, 6); + if (subpath.find(".") != std::string::npos) { + return subpath; + } + return std::nullopt; +} - std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); - std::string ipfsAPIPort(get(params, "port").value_or("5001")); - daemonUri = "http://" + ipfsAPIHost + ":" + ipfsAPIPort; +bool IPFSBinaryCacheStore::ipfsObjectExists(const std::string ipfsPath) +{ + auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(ipfsPath); - // Check the IPFS daemon is running - FileTransferRequest request(daemonUri + "/api/v0/version"); - request.post = true; - request.tries = 1; + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; + try { auto res = getFileTransfer()->download(request); - auto versionInfo = nlohmann::json::parse(*res.data); - if (versionInfo.find("Version") == versionInfo.end()) - throw Error("daemon for IPFS is not running properly"); - - if (compareVersions(versionInfo["Version"], "0.4.0") < 0) - throw Error("daemon for IPFS is %s, when a minimum of 0.4.0 is required", versionInfo["Version"]); - - // Resolve the IPNS name to an IPFS object - if (optIpnsPath) { - auto ipnsPath = *optIpnsPath; - initialIpfsPath = resolveIPNSName(ipnsPath); - state->ipfsPath = initialIpfsPath; - } - - auto json = getIpfsDag(state->ipfsPath); - - // Verify StoreDir is correct - if (json.find("StoreDir") == json.end()) { - json["StoreDir"] = storeDir; - state->ipfsPath = putIpfsDag(json); - } else if (json["StoreDir"] != storeDir) - throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'", - getUri(), json["StoreDir"], storeDir); - - if (json.find("WantMassQuery") != json.end()) - wantMassQuery.setDefault(json["WantMassQuery"] ? "true" : "false"); - - if (json.find("Priority") != json.end()) - priority.setDefault(fmt("%d", json["Priority"])); - } + auto json = nlohmann::json::parse(*res.data); - std::string getUri() override - { - return cacheUri; + return json.find("Hash") != json.end(); + } catch (FileTransferError & e) { + // probably should verify this is a not found error but + // ipfs gives us a 500 + return false; } +} -private: +std::string IPFSBinaryCacheStore::resolveIPNSName(std::string ipnsPath) { + debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); + auto uri = daemonUri + "/api/v0/name/resolve?arg=" + getFileTransfer()->urlEncode(ipnsPath); + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; + auto res = getFileTransfer()->download(request); + auto json = nlohmann::json::parse(*res.data); + if (json.find("Path") == json.end()) + throw Error("daemon for IPFS is not running properly"); + return json["Path"]; +} - std::string putIpfsDag(nlohmann::json data) - { - auto req = FileTransferRequest(daemonUri + "/api/v0/dag/put"); - req.data = std::make_shared(data.dump()); - req.post = true; - req.tries = 1; - auto res = getFileTransfer()->upload(req); - auto json = nlohmann::json::parse(*res.data); - return "/ipfs/" + (std::string) json["Cid"]["/"]; - } +Path IPFSBinaryCacheStore::formatPathAsProtocol(Path path) { + if (hasPrefix(path, "/ipfs/")) + return "ipfs://" + path.substr(strlen("/ipfs/"), string::npos); + else if (hasPrefix(path, "/ipns/")) + return "ipns://" + path.substr(strlen("/ipfs/"), string::npos); + else return path; +} - nlohmann::json getIpfsDag(std::string objectPath) - { - auto req = FileTransferRequest(daemonUri + "/api/v0/dag/get?arg=" + objectPath); - req.post = true; - req.tries = 1; - auto res = getFileTransfer()->download(req); - auto json = nlohmann::json::parse(*res.data); - return json; - } +// IPNS publish can be slow, we try to do it rarely. +void IPFSBinaryCacheStore::sync() +{ + auto state(_state.lock()); - // Given a ipns path, checks if it corresponds to a DNSLink path, and in - // case returns the domain - static std::optional isDNSLinkPath(std::string path) - { - if (path.find("/ipns/") != 0) - throw Error("path '%s' is not an ipns path", path); - auto subpath = std::string(path, 6); - if (subpath.find(".") != std::string::npos) { - return subpath; - } - return std::nullopt; + if (!optIpnsPath) { + if (initialIpfsPath != state->ipfsPath) + throw Error( + "You performed store-modifying actions, creating a new store whose IPFS address doesn't match the configured one:\n" + " configured: %s\n" + " modified: %s\n" + "\n" + "This happens when one has configured nix to use a store via an IPFS hash. Since the store is immutable a new one is made (functional update) and the \"modified\" is its hash. Nix isn't going to statefully switch to using that hash, however, because that would be in violation of the configuration Nix has been given.\n" + "\n" + "You can change you configuration to use this hash, and run the command again in which case it will succeed with a no-opt. But if you are going to modify the store on a regular basis you should use DNS-link or IPNS instead so you have a properly mutable store, and avoid getting this message again.", + formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); + else + return; } - bool ipfsObjectExists(const std::string ipfsPath) - { - auto uri = daemonUri + "/api/v0/object/stat?arg=" + getFileTransfer()->urlEncode(ipfsPath); - - FileTransferRequest request(uri); - request.post = true; - request.tries = 1; - try { - auto res = getFileTransfer()->download(request); - auto json = nlohmann::json::parse(*res.data); - - return json.find("Hash") != json.end(); - } catch (FileTransferError & e) { - // probably should verify this is a not found error but - // ipfs gives us a 500 - return false; - } + auto ipnsPath = *optIpnsPath; + + auto resolvedIpfsPath = resolveIPNSName(ipnsPath); + if (resolvedIpfsPath != initialIpfsPath) { + throw Error( + "The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started:\n" + " expected: %s\n" + " got %s\n" + "\n" + "Perhaps something else updated it in the meantime?", + ipnsPath, initialIpfsPath, resolvedIpfsPath); } - bool fileExists(const std::string & path) - { - return ipfsObjectExists(getIpfsPath() + "/" + path); + if (resolvedIpfsPath == state->ipfsPath) { + printMsg(lvlInfo, "The hash is already up to date, nothing to do"); + return; } - // Resolve the IPNS name to an IPFS object - std::string resolveIPNSName(std::string ipnsPath) { - debug("Resolving IPFS object of '%s', this could take a while.", ipnsPath); - auto uri = daemonUri + "/api/v0/name/resolve?arg=" + getFileTransfer()->urlEncode(ipnsPath); - FileTransferRequest request(uri); - request.post = true; - request.tries = 1; - auto res = getFileTransfer()->download(request); - auto json = nlohmann::json::parse(*res.data); - if (json.find("Path") == json.end()) - throw Error("daemon for IPFS is not running properly"); - return json["Path"]; + // Now, we know that paths are not up to date but also not changed due to updates in DNS or IPNS hash. + auto optDomain = isDNSLinkPath(ipnsPath); + if (optDomain) { + auto domain = *optDomain; + throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n Current DNSLink: %s\nYou should update your DNS settings" + , domain); } -public: - Path formatPathAsProtocol(Path path) { - if (hasPrefix(path, "/ipfs/")) - return "ipfs://" + path.substr(strlen("/ipfs/"), string::npos); - else if (hasPrefix(path, "/ipns/")) - return "ipns://" + path.substr(strlen("/ipfs/"), string::npos); - else return path; - } + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); - // IPNS publish can be slow, we try to do it rarely. - void sync() override - { - auto state(_state.lock()); - - if (!optIpnsPath) { - if (initialIpfsPath != state->ipfsPath) - throw Error( - "You performed store-modifying actions, creating a new store whose IPFS address doesn't match the configured one:\n" - " configured: %s\n" - " modified: %s\n" - "\n" - "This happens when one has configured nix to use a store via an IPFS hash. Since the store is immutable a new one is made (functional update) and the \"modified\" is its hash. Nix isn't going to statefully switch to using that hash, however, because that would be in violation of the configuration Nix has been given.\n" - "\n" - "You can change you configuration to use this hash, and run the command again in which case it will succeed with a no-opt. But if you are going to modify the store on a regular basis you should use DNS-link or IPNS instead so you have a properly mutable store, and avoid getting this message again.", - formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); - else - return; - } + auto uri = daemonUri + "/api/v0/name/publish?allow-offline=true"; + uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); - auto ipnsPath = *optIpnsPath; + // Given the hash, we want to discover the corresponding name in the + // `ipfs key list` command, so that we publish to the right address in + // case the user has multiple ones available. - auto resolvedIpfsPath = resolveIPNSName(ipnsPath); - if (resolvedIpfsPath != initialIpfsPath) { - throw Error( - "The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started:\n" - " expected: %s\n" - " got %s\n" - "\n" - "Perhaps something else updated it in the meantime?", - ipnsPath, initialIpfsPath, resolvedIpfsPath); - } + // NOTE: this is needed for ipfs < 0.5.0 because key must be a + // name, not an address. - if (resolvedIpfsPath == state->ipfsPath) { - printMsg(lvlInfo, "The hash is already up to date, nothing to do"); - return; - } + auto ipnsPathHash = std::string(ipnsPath, 6); + debug("Getting the name corresponding to hash %s", ipnsPathHash); - // Now, we know that paths are not up to date but also not changed due to updates in DNS or IPNS hash. - auto optDomain = isDNSLinkPath(ipnsPath); - if (optDomain) { - auto domain = *optDomain; - throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n Current DNSLink: %s\nYou should update your DNS settings" - , domain); - } + auto keyListRequest = FileTransferRequest(daemonUri + "/api/v0/key/list/"); + keyListRequest.post = true; + keyListRequest.tries = 1; - debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); + auto keyListResponse = nlohmann::json::parse(*(getFileTransfer()->download(keyListRequest)).data); - auto uri = daemonUri + "/api/v0/name/publish?allow-offline=true"; - uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + std::string keyName {""}; + for (auto & key : keyListResponse["Keys"]) + if (key["Id"] == ipnsPathHash) + keyName = key["Name"]; + if (keyName == "") { + throw Error("We couldn't find a name corresponding to the provided ipns hash:\n hash: %s", ipnsPathHash); + } - // Given the hash, we want to discover the corresponding name in the - // `ipfs key list` command, so that we publish to the right address in - // case the user has multiple ones available. + // Now we can append the keyname to our original request + uri += "&key=" + keyName; - // NOTE: this is needed for ipfs < 0.5.0 because key must be a - // name, not an address. + auto req = FileTransferRequest(uri); + req.post = true; + req.tries = 1; + getFileTransfer()->download(req); +} - auto ipnsPathHash = std::string(ipnsPath, 6); - debug("Getting the name corresponding to hash %s", ipnsPathHash); +void IPFSBinaryCacheStore::addLink(std::string name, std::string ipfsObject) +{ + auto state(_state.lock()); - auto keyListRequest = FileTransferRequest(daemonUri + "/api/v0/key/list/"); - keyListRequest.post = true; - keyListRequest.tries = 1; + auto uri = daemonUri + "/api/v0/object/patch/add-link?create=true"; + uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); + uri += "&arg=" + getFileTransfer()->urlEncode(name); + uri += "&arg=" + getFileTransfer()->urlEncode(ipfsObject); - auto keyListResponse = nlohmann::json::parse(*(getFileTransfer()->download(keyListRequest)).data); + auto req = FileTransferRequest(uri); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->download(req); + auto json = nlohmann::json::parse(*res.data); - std::string keyName {""}; - for (auto & key : keyListResponse["Keys"]) - if (key["Id"] == ipnsPathHash) - keyName = key["Name"]; - if (keyName == "") { - throw Error("We couldn't find a name corresponding to the provided ipns hash:\n hash: %s", ipnsPathHash); - } + state->ipfsPath = "/ipfs/" + (std::string) json["Hash"]; +} - // Now we can append the keyname to our original request - uri += "&key=" + keyName; +std::string IPFSBinaryCacheStore::addFile(const std::string & data) +{ + // TODO: use callbacks + + auto req = FileTransferRequest(daemonUri + "/api/v0/add"); + req.data = std::make_shared(data); + req.post = true; + req.tries = 1; + auto res = getFileTransfer()->upload(req); + auto json = nlohmann::json::parse(*res.data); + return (std::string) json["Hash"]; +} - auto req = FileTransferRequest(uri); - req.post = true; - req.tries = 1; - getFileTransfer()->download(req); +void IPFSBinaryCacheStore::upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) +{ + try { + addLink(path, "/ipfs/" + addFile(data)); + } catch (FileTransferError & e) { + throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.info()); } +} -private: - - void addLink(std::string name, std::string ipfsObject) - { - auto state(_state.lock()); - - auto uri = daemonUri + "/api/v0/object/patch/add-link?create=true"; - uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); - uri += "&arg=" + getFileTransfer()->urlEncode(name); - uri += "&arg=" + getFileTransfer()->urlEncode(ipfsObject); +void IPFSBinaryCacheStore::getFile(const std::string & path, + Callback> callback) noexcept +{ + std::string path_(path); + if (hasPrefix(path, "ipfs://")) + path_ = "/ipfs/" + std::string(path, 7); + getIpfsObject(path_, std::move(callback)); +} - auto req = FileTransferRequest(uri); - req.post = true; - req.tries = 1; - auto res = getFileTransfer()->download(req); - auto json = nlohmann::json::parse(*res.data); +void IPFSBinaryCacheStore::getFile(const std::string & path, Sink & sink) +{ + std::promise> promise; + getFile(path, + {[&](std::future> result) { + try { + promise.set_value(result.get()); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }}); + auto data = promise.get_future().get(); + sink((unsigned char *) data->data(), data->size()); +} - state->ipfsPath = "/ipfs/" + (std::string) json["Hash"]; +std::shared_ptr IPFSBinaryCacheStore::getFile(const std::string & path) +{ + StringSink sink; + try { + getFile(path, sink); + } catch (NoSuchBinaryCacheFile &) { + return nullptr; } + return sink.s; +} - std::string addFile(const std::string & data) - { - // TODO: use callbacks +void IPFSBinaryCacheStore::getIpfsObject(const std::string & ipfsPath, + Callback> callback) noexcept +{ + auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(ipfsPath); - auto req = FileTransferRequest(daemonUri + "/api/v0/add"); - req.data = std::make_shared(data); - req.post = true; - req.tries = 1; - auto res = getFileTransfer()->upload(req); - auto json = nlohmann::json::parse(*res.data); - return (std::string) json["Hash"]; - } + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; - void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType) - { - try { - addLink(path, "/ipfs/" + addFile(data)); - } catch (FileTransferError & e) { - throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.info()); - } - } + auto callbackPtr = std::make_shared(std::move(callback)); - void getFile(const std::string & path, - Callback> callback) noexcept - { - std::string path_(path); - if (hasPrefix(path, "ipfs://")) - path_ = "/ipfs/" + std::string(path, 7); - getIpfsObject(path_, std::move(callback)); - } + getFileTransfer()->enqueueFileTransfer(request, + {[callbackPtr](std::future result){ + try { + (*callbackPtr)(result.get().data); + } catch (FileTransferError & e) { + return (*callbackPtr)(std::shared_ptr()); + } catch (...) { + callbackPtr->rethrow(); + } + }} + ); +} - void getFile(const std::string & path, Sink & sink) - { - std::promise> promise; - getFile(path, - {[&](std::future> result) { - try { - promise.set_value(result.get()); - } catch (...) { - promise.set_exception(std::current_exception()); - } - }}); - auto data = promise.get_future().get(); - sink((unsigned char *) data->data(), data->size()); - } +void IPFSBinaryCacheStore::writeNarInfo(ref narInfo) +{ + auto json = nlohmann::json::object(); + json["narHash"] = narInfo->narHash.to_string(Base32, true); + json["narSize"] = narInfo->narSize; - std::shared_ptr getFile(const std::string & path) - { - StringSink sink; - try { - getFile(path, sink); - } catch (NoSuchBinaryCacheFile &) { - return nullptr; - } - return sink.s; - } + auto narMap = getIpfsDag(getIpfsPath())["nar"]; - void getIpfsObject(const std::string & ipfsPath, - Callback> callback) noexcept - { - auto uri = daemonUri + "/api/v0/cat?arg=" + getFileTransfer()->urlEncode(ipfsPath); - - FileTransferRequest request(uri); - request.post = true; - request.tries = 1; - - auto callbackPtr = std::make_shared(std::move(callback)); - - getFileTransfer()->enqueueFileTransfer(request, - {[callbackPtr](std::future result){ - try { - (*callbackPtr)(result.get().data); - } catch (FileTransferError & e) { - return (*callbackPtr)(std::shared_ptr()); - } catch (...) { - callbackPtr->rethrow(); - } - }} - ); + json["references"] = nlohmann::json::object(); + json["hasSelfReference"] = false; + for (auto & ref : narInfo->references) { + if (ref == narInfo->path) + json["hasSelfReference"] = true; + else + json["references"].emplace(ref.to_string(), narMap[(std::string) ref.to_string()]); } - void writeNarInfo(ref narInfo) - { - auto json = nlohmann::json::object(); - json["narHash"] = narInfo->narHash.to_string(Base32, true); - json["narSize"] = narInfo->narSize; - - auto narMap = getIpfsDag(getIpfsPath())["nar"]; - - json["references"] = nlohmann::json::object(); - json["hasSelfReference"] = false; - for (auto & ref : narInfo->references) { - if (ref == narInfo->path) - json["hasSelfReference"] = true; - else - json["references"].emplace(ref.to_string(), narMap[(std::string) ref.to_string()]); - } + json["ca"] = narInfo->ca; - json["ca"] = narInfo->ca; + if (narInfo->deriver) + json["deriver"] = printStorePath(*narInfo->deriver); - if (narInfo->deriver) - json["deriver"] = printStorePath(*narInfo->deriver); + json["registrationTime"] = narInfo->registrationTime; + json["ultimate"] = narInfo->ultimate; - json["registrationTime"] = narInfo->registrationTime; - json["ultimate"] = narInfo->ultimate; + json["sigs"] = nlohmann::json::array(); + for (auto & sig : narInfo->sigs) + json["sigs"].push_back(sig); - json["sigs"] = nlohmann::json::array(); - for (auto & sig : narInfo->sigs) - json["sigs"].push_back(sig); + if (!narInfo->url.empty()) { + json["ipfsCid"] = nlohmann::json::object(); + json["ipfsCid"]["/"] = std::string(narInfo->url, 7); + } - if (!narInfo->url.empty()) { - json["ipfsCid"] = nlohmann::json::object(); - json["ipfsCid"]["/"] = std::string(narInfo->url, 7); - } + if (narInfo->fileHash) + json["downloadHash"] = narInfo->fileHash.to_string(Base32, true); - if (narInfo->fileHash) - json["downloadHash"] = narInfo->fileHash.to_string(Base32, true); + json["downloadSize"] = narInfo->fileSize; + json["compression"] = narInfo->compression; + json["system"] = narInfo->system; - json["downloadSize"] = narInfo->fileSize; - json["compression"] = narInfo->compression; - json["system"] = narInfo->system; + auto narObjectPath = putIpfsDag(json); - auto narObjectPath = putIpfsDag(json); + auto state(_state.lock()); + json = getIpfsDag(state->ipfsPath); - auto state(_state.lock()); - json = getIpfsDag(state->ipfsPath); + if (json.find("nar") == json.end()) + json["nar"] = nlohmann::json::object(); - if (json.find("nar") == json.end()) - json["nar"] = nlohmann::json::object(); + auto hashObject = nlohmann::json::object(); + hashObject.emplace("/", std::string(narObjectPath, 6)); - auto hashObject = nlohmann::json::object(); - hashObject.emplace("/", std::string(narObjectPath, 6)); + json["nar"].emplace(narInfo->path.to_string(), hashObject); - json["nar"].emplace(narInfo->path.to_string(), hashObject); + state->ipfsPath = putIpfsDag(json); - state->ipfsPath = putIpfsDag(json); - - { - auto hashPart = narInfo->path.hashPart(); - auto state_(this->state.lock()); - state_->pathInfoCache.upsert( - std::string { hashPart }, - PathInfoCacheValue { .value = std::shared_ptr(narInfo) }); - } - } - -public: - - void addToStore(const ValidPathInfo & info, Source & narSource, - RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr accessor) override { - // FIXME: See if we can use the original source to reduce memory usage. - auto nar = make_ref(narSource.drain()); - - if (!repair && isValidPath(info.path)) return; + auto hashPart = narInfo->path.hashPart(); + auto state_(this->state.lock()); + state_->pathInfoCache.upsert( + std::string { hashPart }, + PathInfoCacheValue { .value = std::shared_ptr(narInfo) }); + } +} - /* Verify that all references are valid. This may do some .narinfo - reads, but typically they'll already be cached. */ - for (auto & ref : info.references) - try { - if (ref != info.path) - queryPathInfo(ref); - } catch (InvalidPath &) { - throw Error("cannot add '%s' to the binary cache because the reference '%s' is not valid", - printStorePath(info.path), printStorePath(ref)); - } +void IPFSBinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr accessor) +{ + // FIXME: See if we can use the original source to reduce memory usage. + auto nar = make_ref(narSource.drain()); - assert(nar->compare(0, narMagic.size(), narMagic) == 0); + if (!repair && isValidPath(info.path)) return; - auto narInfo = make_ref(info); + /* Verify that all references are valid. This may do some .narinfo + reads, but typically they'll already be cached. */ + for (auto & ref : info.references) + try { + if (ref != info.path) + queryPathInfo(ref); + } catch (InvalidPath &) { + throw Error("cannot add '%s' to the binary cache because the reference '%s' is not valid", + printStorePath(info.path), printStorePath(ref)); + } - narInfo->narSize = nar->size(); - narInfo->narHash = hashString(htSHA256, *nar); + assert(nar->compare(0, narMagic.size(), narMagic) == 0); - if (info.narHash && info.narHash != narInfo->narHash) - throw Error("refusing to copy corrupted path '%1%' to binary cache", printStorePath(info.path)); + auto narInfo = make_ref(info); - /* Compress the NAR. */ - narInfo->compression = compression; - auto now1 = std::chrono::steady_clock::now(); - auto narCompressed = compress(compression, *nar, parallelCompression); - auto now2 = std::chrono::steady_clock::now(); - narInfo->fileHash = hashString(htSHA256, *narCompressed); - narInfo->fileSize = narCompressed->size(); + narInfo->narSize = nar->size(); + narInfo->narHash = hashString(htSHA256, *nar); - auto duration = std::chrono::duration_cast(now2 - now1).count(); - printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache", - printStorePath(narInfo->path), narInfo->narSize, - ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0), - duration); + if (info.narHash && info.narHash != narInfo->narHash) + throw Error("refusing to copy corrupted path '%1%' to binary cache", printStorePath(info.path)); - /* Atomically write the NAR file. */ - stats.narWrite++; - narInfo->url = "ipfs://" + addFile(*narCompressed); + /* Compress the NAR. */ + narInfo->compression = compression; + auto now1 = std::chrono::steady_clock::now(); + auto narCompressed = compress(compression, *nar, parallelCompression); + auto now2 = std::chrono::steady_clock::now(); + narInfo->fileHash = hashString(htSHA256, *narCompressed); + narInfo->fileSize = narCompressed->size(); - stats.narWriteBytes += nar->size(); - stats.narWriteCompressedBytes += narCompressed->size(); - stats.narWriteCompressionTimeMs += duration; + auto duration = std::chrono::duration_cast(now2 - now1).count(); + printMsg(lvlTalkative, "copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache", + printStorePath(narInfo->path), narInfo->narSize, + ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0), + duration); - /* Atomically write the NAR info file.*/ - if (secretKey) narInfo->sign(*this, *secretKey); + /* Atomically write the NAR file. */ + stats.narWrite++; + narInfo->url = "ipfs://" + addFile(*narCompressed); - writeNarInfo(narInfo); + stats.narWriteBytes += nar->size(); + stats.narWriteCompressedBytes += narCompressed->size(); + stats.narWriteCompressionTimeMs += duration; - stats.narInfoWrite++; - } + /* Atomically write the NAR info file.*/ + if (secretKey) narInfo->sign(*this, *secretKey); - bool isValidPathUncached(const StorePath & storePath) override - { - auto json = getIpfsDag(getIpfsPath()); - if (!json.contains("nar")) - return false; - return json["nar"].contains(storePath.to_string()); - } + writeNarInfo(narInfo); - void narFromPath(const StorePath & storePath, Sink & sink) override - { - auto info = queryPathInfo(storePath).cast(); + stats.narInfoWrite++; +} - uint64_t narSize = 0; +bool IPFSBinaryCacheStore::isValidPathUncached(const StorePath & storePath) +{ + auto json = getIpfsDag(getIpfsPath()); + if (!json.contains("nar")) + return false; + return json["nar"].contains(storePath.to_string()); +} - LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { - sink(data, len); - narSize += len; - }); +void IPFSBinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink) +{ + auto info = queryPathInfo(storePath).cast(); - auto decompressor = makeDecompressionSink(info->compression, wrapperSink); + uint64_t narSize = 0; - try { - getFile(info->url, *decompressor); - } catch (NoSuchBinaryCacheFile & e) { - throw SubstituteGone(e.what()); - } + LambdaSink wrapperSink([&](const unsigned char * data, size_t len) { + sink(data, len); + narSize += len; + }); - decompressor->finish(); + auto decompressor = makeDecompressionSink(info->compression, wrapperSink); - stats.narRead++; - //stats.narReadCompressedBytes += nar->size(); // FIXME - stats.narReadBytes += narSize; + try { + getFile(info->url, *decompressor); + } catch (NoSuchBinaryCacheFile & e) { + throw SubstituteGone(e.what()); } - void queryPathInfoUncached(const StorePath & storePath, - Callback> callback) noexcept override - { - // TODO: properly use callbacks - - auto callbackPtr = std::make_shared(std::move(callback)); - - auto uri = getUri(); - auto storePathS = printStorePath(storePath); - auto act = std::make_shared(*logger, lvlTalkative, actQueryPathInfo, - fmt("querying info about '%s' on '%s'", storePathS, uri), Logger::Fields{storePathS, uri}); - PushActivity pact(act->id); + decompressor->finish(); - auto json = getIpfsDag(getIpfsPath()); + stats.narRead++; + //stats.narReadCompressedBytes += nar->size(); // FIXME + stats.narReadBytes += narSize; +} - if (!json.contains("nar") || !json["nar"].contains(storePath.to_string())) - return (*callbackPtr)(nullptr); +void IPFSBinaryCacheStore::queryPathInfoUncached(const StorePath & storePath, + Callback> callback) noexcept +{ + // TODO: properly use callbacks - auto narObjectHash = (std::string) json["nar"][(std::string) storePath.to_string()]["/"]; - json = getIpfsDag("/ipfs/" + narObjectHash); + auto callbackPtr = std::make_shared(std::move(callback)); - NarInfo narInfo { storePath }; - narInfo.narHash = Hash((std::string) json["narHash"]); - narInfo.narSize = json["narSize"]; + auto uri = getUri(); + auto storePathS = printStorePath(storePath); + auto act = std::make_shared(*logger, lvlTalkative, actQueryPathInfo, + fmt("querying info about '%s' on '%s'", storePathS, uri), Logger::Fields{storePathS, uri}); + PushActivity pact(act->id); - for (auto & ref : json["references"].items()) - narInfo.references.insert(StorePath(ref.key())); + auto json = getIpfsDag(getIpfsPath()); - if (json["hasSelfReference"]) - narInfo.references.insert(storePath); + if (!json.contains("nar") || !json["nar"].contains(storePath.to_string())) + return (*callbackPtr)(nullptr); - if (json.find("ca") != json.end()) - json["ca"].get_to(narInfo.ca); + auto narObjectHash = (std::string) json["nar"][(std::string) storePath.to_string()]["/"]; + json = getIpfsDag("/ipfs/" + narObjectHash); - if (json.find("deriver") != json.end()) - narInfo.deriver = parseStorePath((std::string) json["deriver"]); + NarInfo narInfo { storePath }; + narInfo.narHash = Hash((std::string) json["narHash"]); + narInfo.narSize = json["narSize"]; - if (json.find("registrationTime") != json.end()) - narInfo.registrationTime = json["registrationTime"]; + for (auto & ref : json["references"].items()) + narInfo.references.insert(StorePath(ref.key())); - if (json.find("ultimate") != json.end()) - narInfo.ultimate = json["ultimate"]; + if (json["hasSelfReference"]) + narInfo.references.insert(storePath); - if (json.find("sigs") != json.end()) - for (auto & sig : json["sigs"]) - narInfo.sigs.insert((std::string) sig); + if (json.find("ca") != json.end()) + json["ca"].get_to(narInfo.ca); - if (json.find("ipfsCid") != json.end()) - narInfo.url = "ipfs://" + json["ipfsCid"]["/"].get(); + if (json.find("deriver") != json.end()) + narInfo.deriver = parseStorePath((std::string) json["deriver"]); - if (json.find("downloadHash") != json.end()) - narInfo.fileHash = Hash((std::string) json["downloadHash"]); + if (json.find("registrationTime") != json.end()) + narInfo.registrationTime = json["registrationTime"]; - if (json.find("downloadSize") != json.end()) - narInfo.fileSize = json["downloadSize"]; + if (json.find("ultimate") != json.end()) + narInfo.ultimate = json["ultimate"]; - if (json.find("compression") != json.end()) - narInfo.compression = json["compression"]; + if (json.find("sigs") != json.end()) + for (auto & sig : json["sigs"]) + narInfo.sigs.insert((std::string) sig); - if (json.find("system") != json.end()) - narInfo.system = json["system"]; + if (json.find("ipfsCid") != json.end()) + narInfo.url = "ipfs://" + json["ipfsCid"]["/"].get(); - (*callbackPtr)((std::shared_ptr) - std::make_shared(narInfo)); - } + if (json.find("downloadHash") != json.end()) + narInfo.fileHash = Hash((std::string) json["downloadHash"]); - StorePath addToStore(const string & name, const Path & srcPath, - FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) override - { - // FIXME: some cut&paste from LocalStore::addToStore(). + if (json.find("downloadSize") != json.end()) + narInfo.fileSize = json["downloadSize"]; - /* Read the whole path into memory. This is not a very scalable - method for very large paths, but `copyPath' is mainly used for - small files. */ - StringSink sink; - Hash h; - if (method == FileIngestionMethod::Recursive) { - dumpPath(srcPath, sink, filter); - h = hashString(hashAlgo, *sink.s); - } else { - auto s = readFile(srcPath); - dumpString(s, sink); - h = hashString(hashAlgo, s); - } + if (json.find("compression") != json.end()) + narInfo.compression = json["compression"]; - ValidPathInfo info(makeFixedOutputPath(method, h, name)); + if (json.find("system") != json.end()) + narInfo.system = json["system"]; - auto source = StringSource { *sink.s }; - addToStore(info, source, repair, CheckSigs, nullptr); + (*callbackPtr)((std::shared_ptr) + std::make_shared(narInfo)); +} - return std::move(info.path); +StorePath IPFSBinaryCacheStore::addToStore(const string & name, const Path & srcPath, + FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) +{ + // FIXME: some cut&paste from LocalStore::addToStore(). + + /* Read the whole path into memory. This is not a very scalable + method for very large paths, but `copyPath' is mainly used for + small files. */ + StringSink sink; + Hash h; + if (method == FileIngestionMethod::Recursive) { + dumpPath(srcPath, sink, filter); + h = hashString(hashAlgo, *sink.s); + } else { + auto s = readFile(srcPath); + dumpString(s, sink); + h = hashString(hashAlgo, s); } - StorePath addTextToStore(const string & name, const string & s, - const StorePathSet & references, RepairFlag repair) override - { - ValidPathInfo info(computeStorePathForText(name, s, references)); - info.references = references; - - if (repair || !isValidPath(info.path)) { - StringSink sink; - dumpString(s, sink); - auto source = StringSource { *sink.s }; - addToStore(info, source, repair, CheckSigs, nullptr); - } - - return std::move(info.path); - } + ValidPathInfo info(makeFixedOutputPath(method, h, name)); - void addSignatures(const StorePath & storePath, const StringSet & sigs) override - { - /* Note: this is inherently racy since there is no locking on - binary caches. In particular, with S3 this unreliable, even - when addSignatures() is called sequentially on a path, because - S3 might return an outdated cached version. */ + auto source = StringSource { *sink.s }; + addToStore(info, source, repair, CheckSigs, nullptr); - auto narInfo = make_ref((NarInfo &) *queryPathInfo(storePath)); + return std::move(info.path); +} - narInfo->sigs.insert(sigs.begin(), sigs.end()); +StorePath IPFSBinaryCacheStore::addTextToStore(const string & name, const string & s, + const StorePathSet & references, RepairFlag repair) +{ + ValidPathInfo info(computeStorePathForText(name, s, references)); + info.references = references; - writeNarInfo(narInfo); + if (repair || !isValidPath(info.path)) { + StringSink sink; + dumpString(s, sink); + auto source = StringSource { *sink.s }; + addToStore(info, source, repair, CheckSigs, nullptr); } - virtual void addTempRoot(const StorePath & path) override - { - // TODO make temporary pin/addToStore, see - // https://github.com/ipfs/go-ipfs/issues/4559 and - // https://github.com/ipfs/go-ipfs/issues/4328 for some ideas. - auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/" "nar" "/" + string { path.to_string() }; - - FileTransferRequest request(uri); - request.post = true; - request.tries = 1; - getFileTransfer()->upload(request); - } + return std::move(info.path); +} - std::shared_ptr getBuildLog(const StorePath & path) override - { unsupported("getBuildLog"); } +void IPFSBinaryCacheStore::addSignatures(const StorePath & storePath, const StringSet & sigs) +{ + /* Note: this is inherently racy since there is no locking on + binary caches. In particular, with S3 this unreliable, even + when addSignatures() is called sequentially on a path, because + S3 might return an outdated cached version. */ - BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, - BuildMode buildMode) override - { unsupported("buildDerivation"); } + auto narInfo = make_ref((NarInfo &) *queryPathInfo(storePath)); - void ensurePath(const StorePath & path) override - { unsupported("ensurePath"); } + narInfo->sigs.insert(sigs.begin(), sigs.end()); - std::optional queryPathFromHashPart(const std::string & hashPart) override - { unsupported("queryPathFromHashPart"); } + writeNarInfo(narInfo); +} -}; +void IPFSBinaryCacheStore::addTempRoot(const StorePath & path) +{ + // TODO make temporary pin/addToStore, see + // https://github.com/ipfs/go-ipfs/issues/4559 and + // https://github.com/ipfs/go-ipfs/issues/4328 for some ideas. + auto uri = daemonUri + "/api/v0/pin/add?arg=" + getIpfsPath() + "/" "nar" "/" + string { path.to_string() }; + + FileTransferRequest request(uri); + request.post = true; + request.tries = 1; + getFileTransfer()->upload(request); +} static RegisterStoreImplementation regStore([]( const std::string & uri, const Store::Params & params) diff --git a/src/libstore/ipfs-binary-cache-store.hh b/src/libstore/ipfs-binary-cache-store.hh new file mode 100644 index 00000000000..d1201a65184 --- /dev/null +++ b/src/libstore/ipfs-binary-cache-store.hh @@ -0,0 +1,130 @@ +#pragma once + +namespace nix { + +MakeError(UploadToIPFS, Error); + +class IPFSBinaryCacheStore : public Store +{ + +public: + + const Setting compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; + const Setting secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; + const Setting parallelCompression{this, false, "parallel-compression", + "enable multi-threading compression, available for xz only currently"}; + +private: + + std::unique_ptr secretKey; + std::string narMagic; + + std::string cacheUri; + std::string daemonUri; + + std::string getIpfsPath() { + auto state(_state.lock()); + return state->ipfsPath; + } + std::string initialIpfsPath; + std::optional optIpnsPath; + + struct State + { + std::string ipfsPath; + }; + Sync _state; + +public: + + IPFSBinaryCacheStore(const Params & params, const Path & _cacheUri); + + std::string getUri() override + { + return cacheUri; + } + +private: + + std::string putIpfsDag(nlohmann::json data); + + nlohmann::json getIpfsDag(std::string objectPath); + + // Given a ipns path, checks if it corresponds to a DNSLink path, and in + // case returns the domain + static std::optional isDNSLinkPath(std::string path); + + bool ipfsObjectExists(const std::string ipfsPath); + + bool fileExists(const std::string & path) + { + return ipfsObjectExists(getIpfsPath() + "/" + path); + } + + // Resolve the IPNS name to an IPFS object + std::string resolveIPNSName(std::string ipnsPath); + +public: + Path formatPathAsProtocol(Path path); + + // IPNS publish can be slow, we try to do it rarely. + void sync() override; + +private: + + void addLink(std::string name, std::string ipfsObject); + + std::string addFile(const std::string & data); + + void upsertFile(const std::string & path, const std::string & data, const std::string & mimeType); + + void getFile(const std::string & path, + Callback> callback) noexcept; + + void getFile(const std::string & path, Sink & sink); + + std::shared_ptr getFile(const std::string & path); + + void getIpfsObject(const std::string & ipfsPath, + Callback> callback) noexcept; + + void writeNarInfo(ref narInfo); + +public: + + void addToStore(const ValidPathInfo & info, Source & narSource, + RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr accessor) override; + + bool isValidPathUncached(const StorePath & storePath) override; + + void narFromPath(const StorePath & storePath, Sink & sink) override; + + void queryPathInfoUncached(const StorePath & storePath, + Callback> callback) noexcept override; + + StorePath addToStore(const string & name, const Path & srcPath, + FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) override; + + StorePath addTextToStore(const string & name, const string & s, + const StorePathSet & references, RepairFlag repair) override; + + void addSignatures(const StorePath & storePath, const StringSet & sigs) override; + + virtual void addTempRoot(const StorePath & path) override; + + std::shared_ptr getBuildLog(const StorePath & path) override + { unsupported("getBuildLog"); } + + BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, + BuildMode buildMode) override + { unsupported("buildDerivation"); } + + void ensurePath(const StorePath & path) override + { unsupported("ensurePath"); } + + std::optional queryPathFromHashPart(const std::string & hashPart) override + { unsupported("queryPathFromHashPart"); } + +}; + +} From 1eb8ac7469034b9924cdcdcf9ab46c8a107e0a46 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Tue, 7 Jul 2020 16:34:16 -0400 Subject: [PATCH 098/104] Fix include directives for ipfs binary cache store --- src/libstore/ipfs-binary-cache-store.cc | 3 +-- src/libstore/ipfs-binary-cache-store.hh | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index e5a2ab799cd..da224aea021 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -1,13 +1,12 @@ #include #include -#include "binary-cache-store.hh" +#include "ipfs-binary-cache-store.hh" #include "filetransfer.hh" #include "nar-info-disk-cache.hh" #include "archive.hh" #include "compression.hh" #include "names.hh" -#include "ipfs-binary-cache-store.hh" namespace nix { diff --git a/src/libstore/ipfs-binary-cache-store.hh b/src/libstore/ipfs-binary-cache-store.hh index d1201a65184..12953397ad8 100644 --- a/src/libstore/ipfs-binary-cache-store.hh +++ b/src/libstore/ipfs-binary-cache-store.hh @@ -1,5 +1,7 @@ #pragma once +#include "binary-cache-store.hh" + namespace nix { MakeError(UploadToIPFS, Error); From f618957c6701566c56b6d43ddfda2e246df14582 Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Wed, 15 Jul 2020 14:36:44 -0400 Subject: [PATCH 099/104] Add todo for better error wrapping --- src/libstore/ipfs-binary-cache-store.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 0bd89b1443a..62e0b1ffd14 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -267,6 +267,7 @@ void IPFSBinaryCacheStore::upsertFile(const std::string & path, const std::strin try { addLink(path, "/ipfs/" + addFile(data)); } catch (FileTransferError & e) { + // TODO: may wrap the inner error in a better way. throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); } } From fa5ffd0f8ca624599558ce319d5f4c08505867cd Mon Sep 17 00:00:00 2001 From: Matthew Bauer Date: Fri, 10 Jul 2020 15:22:35 -0400 Subject: [PATCH 100/104] Add allow-modify flag to cache --- src/libstore/ipfs-binary-cache-store.cc | 66 ++++++++++++++----------- src/libstore/ipfs-binary-cache-store.hh | 8 ++- tests/ipfs.sh | 12 ++--- 3 files changed, 49 insertions(+), 37 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 62e0b1ffd14..8d672606840 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -28,11 +28,20 @@ IPFSBinaryCacheStore::IPFSBinaryCacheStore( cacheUri.pop_back(); if (hasPrefix(cacheUri, "ipfs://")) { - initialIpfsPath = "/ipfs/" + std::string(cacheUri, 7); - state->ipfsPath = initialIpfsPath; - } else if (hasPrefix(cacheUri, "ipns://")) - optIpnsPath = "/ipns/" + std::string(cacheUri, 7); - else + if (cacheUri == "ipfs://") { + allowModify = true; + } else { + initialIpfsPath = "/ipfs/" + std::string(cacheUri, 7); + state->ipfsPath = initialIpfsPath; + allowModify = get(params, "allow-modify").value_or("") == "true"; + } + } else if (hasPrefix(cacheUri, "ipns://")) { + ipnsPath = "/ipns/" + std::string(cacheUri, 7); + + // TODO: we should try to determine if we are able to modify + // this ipns + allowModify = true; + } else throw Error("unknown IPNS URI '%s'", cacheUri); std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); @@ -52,9 +61,8 @@ IPFSBinaryCacheStore::IPFSBinaryCacheStore( throw Error("daemon for IPFS is %s, when a minimum of 0.4.0 is required", versionInfo["Version"]); // Resolve the IPNS name to an IPFS object - if (optIpnsPath) { - auto ipnsPath = *optIpnsPath; - initialIpfsPath = resolveIPNSName(ipnsPath); + if (ipnsPath) { + initialIpfsPath = resolveIPNSName(*ipnsPath); state->ipfsPath = initialIpfsPath; } @@ -152,32 +160,26 @@ void IPFSBinaryCacheStore::sync() { auto state(_state.lock()); - if (!optIpnsPath) { - if (initialIpfsPath != state->ipfsPath) - throw Error( - "You performed store-modifying actions, creating a new store whose IPFS address doesn't match the configured one:\n" - " configured: %s\n" - " modified: %s\n" - "\n" - "This happens when one has configured nix to use a store via an IPFS hash. Since the store is immutable a new one is made (functional update) and the \"modified\" is its hash. Nix isn't going to statefully switch to using that hash, however, because that would be in violation of the configuration Nix has been given.\n" - "\n" - "You can change you configuration to use this hash, and run the command again in which case it will succeed with a no-opt. But if you are going to modify the store on a regular basis you should use DNS-link or IPNS instead so you have a properly mutable store, and avoid getting this message again.", - formatPathAsProtocol(initialIpfsPath), formatPathAsProtocol(state->ipfsPath)); - else - return; - } + if (state->ipfsPath == initialIpfsPath) + return; - auto ipnsPath = *optIpnsPath; + if (!allowModify) + throw Error("can't update '%s' to '%s'", cacheUri, state->ipfsPath); - auto resolvedIpfsPath = resolveIPNSName(ipnsPath); + if (!ipnsPath) { + warn("created new store at '%s', but can't update store at '%s'", "ipfs://" + std::string(state->ipfsPath, 6), cacheUri); + return; + } + + auto resolvedIpfsPath = resolveIPNSName(*ipnsPath); if (resolvedIpfsPath != initialIpfsPath) { throw Error( - "The IPNS hash or DNS link %s resolves now to something different from the value it had when Nix was started:\n" + "The IPNS hash or DNS link %s resolves to something different from the value it had when Nix was started:\n" " expected: %s\n" " got %s\n" "\n" "Perhaps something else updated it in the meantime?", - ipnsPath, initialIpfsPath, resolvedIpfsPath); + *ipnsPath, initialIpfsPath, resolvedIpfsPath); } if (resolvedIpfsPath == state->ipfsPath) { @@ -186,14 +188,14 @@ void IPFSBinaryCacheStore::sync() } // Now, we know that paths are not up to date but also not changed due to updates in DNS or IPNS hash. - auto optDomain = isDNSLinkPath(ipnsPath); + auto optDomain = isDNSLinkPath(*ipnsPath); if (optDomain) { auto domain = *optDomain; throw Error("The provided ipns path is a DNSLink, and syncing those is not supported.\n Current DNSLink: %s\nYou should update your DNS settings" , domain); } - debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, ipnsPath); + debug("Publishing '%s' to '%s', this could take a while.", state->ipfsPath, *ipnsPath); auto uri = daemonUri + "/api/v0/name/publish?allow-offline=true"; uri += "&arg=" + getFileTransfer()->urlEncode(state->ipfsPath); @@ -205,7 +207,7 @@ void IPFSBinaryCacheStore::sync() // NOTE: this is needed for ipfs < 0.5.0 because key must be a // name, not an address. - auto ipnsPathHash = std::string(ipnsPath, 6); + auto ipnsPathHash = std::string(*ipnsPath, 6); debug("Getting the name corresponding to hash %s", ipnsPathHash); auto keyListRequest = FileTransferRequest(daemonUri + "/api/v0/key/list/"); @@ -404,6 +406,9 @@ void IPFSBinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSo if (!repair && isValidPath(info.path)) return; + if (!allowModify) + throw Error("can't update '%s'", cacheUri); + /* Verify that all references are valid. This may do some .narinfo reads, but typically they'll already be cached. */ for (auto & ref : info.references) @@ -600,6 +605,9 @@ StorePath IPFSBinaryCacheStore::addTextToStore(const string & name, const string void IPFSBinaryCacheStore::addSignatures(const StorePath & storePath, const StringSet & sigs) { + if (!allowModify) + throw Error("can't update '%s'", cacheUri); + /* Note: this is inherently racy since there is no locking on binary caches. In particular, with S3 this unreliable, even when addSignatures() is called sequentially on a path, because diff --git a/src/libstore/ipfs-binary-cache-store.hh b/src/libstore/ipfs-binary-cache-store.hh index 8a3502ea307..dcb7ad0ac25 100644 --- a/src/libstore/ipfs-binary-cache-store.hh +++ b/src/libstore/ipfs-binary-cache-store.hh @@ -16,8 +16,14 @@ public: const Setting parallelCompression{this, false, "parallel-compression", "enable multi-threading compression, available for xz only currently"}; + // FIXME: merge with allowModify bool + const Setting _allowModify{this, false, "allow-modify", + "allow Nix to update IPFS/IPNS address when appropriate"}; + private: + bool allowModify; + std::unique_ptr secretKey; std::string narMagic; @@ -29,7 +35,7 @@ private: return state->ipfsPath; } std::string initialIpfsPath; - std::optional optIpnsPath; + std::optional ipnsPath; struct State { diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 673ed060a38..a544c2f6703 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -54,11 +54,11 @@ touch $TEST_FILE # We try to do the evaluation with a known wrong hash to get the suggestion for # the correct one -! CORRECT_ADDRESS=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store ipfs://$EMPTY_HASH |& \ - grep 'modified:' | awk '{print $2}') +CORRECT_ADDRESS=$(nix eval --raw '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store ipfs://$EMPTY_HASH?allow-modify=true |& \ + grep '^warning: created new store' | sed "s/^warning: created new store at '\(.*\)', .*$/\1/") # Then we eval and get back the hash-name part of the store path -RESULT=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store $CORRECT_ADDRESS --json \ +RESULT=$(nix eval --raw '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store $CORRECT_ADDRESS --json \ | jq -r | awk -F/ '{print $NF}') # Finally, we ask back the info from IPFS (formatting the address the right way @@ -117,10 +117,8 @@ nix-build ./fixed.nix -A good \ ################################################################################ # Try to upload the content to the empty directory, fail but grab the right hash -IPFS_ADDRESS=$(set -e; \ - set -o pipefail; \ - ! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command \ - |& grep modified: | awk '{print $2}') +IPFS_ADDRESS=$(nix copy --to ipfs://$EMPTY_HASH?allow-modify=true $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command |& \ + grep '^warning: created new store' | sed "s/^warning: created new store at '\(.*\)', .*$/\1/") # Verify that new path is valid. nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command From 9d223c10fca060ea87d4a063b550c97f8f2ba61b Mon Sep 17 00:00:00 2001 From: Carlo Nucera Date: Mon, 13 Jul 2020 13:11:38 -0400 Subject: [PATCH 101/104] Change to assertion, better errors, new test --- src/libstore/ipfs-binary-cache-store.cc | 9 ++++++--- tests/ipfs.sh | 12 +++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 8d672606840..44f6c08addf 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -163,11 +163,14 @@ void IPFSBinaryCacheStore::sync() if (state->ipfsPath == initialIpfsPath) return; - if (!allowModify) - throw Error("can't update '%s' to '%s'", cacheUri, state->ipfsPath); + // If we aren't in trustless mode (handled above) and we don't allow + // modifications, state->ipfsPath should never be changed from the initial + // one, + assert(allowModify); if (!ipnsPath) { - warn("created new store at '%s', but can't update store at '%s'", "ipfs://" + std::string(state->ipfsPath, 6), cacheUri); + warn("created new store at '%s'. The old store at %s is immutable, so we can't update it", + "ipfs://" + std::string(state->ipfsPath, 6), cacheUri); return; } diff --git a/tests/ipfs.sh b/tests/ipfs.sh index a544c2f6703..0242ff2d6e5 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -55,10 +55,10 @@ touch $TEST_FILE # We try to do the evaluation with a known wrong hash to get the suggestion for # the correct one CORRECT_ADDRESS=$(nix eval --raw '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store ipfs://$EMPTY_HASH?allow-modify=true |& \ - grep '^warning: created new store' | sed "s/^warning: created new store at '\(.*\)', .*$/\1/") + grep '^warning: created new store' | sed "s/^warning: created new store at '\(.*\)'\. .*$/\1/") # Then we eval and get back the hash-name part of the store path -RESULT=$(nix eval --raw '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store $CORRECT_ADDRESS --json \ +RESULT=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store $CORRECT_ADDRESS --json \ | jq -r | awk -F/ '{print $NF}') # Finally, we ask back the info from IPFS (formatting the address the right way @@ -117,8 +117,14 @@ nix-build ./fixed.nix -A good \ ################################################################################ # Try to upload the content to the empty directory, fail but grab the right hash +# HERE do the same thing but expect failure IPFS_ADDRESS=$(nix copy --to ipfs://$EMPTY_HASH?allow-modify=true $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command |& \ - grep '^warning: created new store' | sed "s/^warning: created new store at '\(.*\)', .*$/\1/") + grep '^warning: created new store' | sed "s/^warning: created new store at '\(.*\)'\. .*$/\1/") + +# We want to check that the `allow-modify` flag is required for the command to +# succeed. This is an invocation of the same command without that flag that we +# expect to fail +! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command # Verify that new path is valid. nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command From cde9c15d17ee5e38f5fcd55e97573ae7e6a4ebb4 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Tue, 22 Sep 2020 03:38:31 +0000 Subject: [PATCH 102/104] WIP: Get IPFS store working with StoreConfig refactor --- src/libstore/ipfs-binary-cache-store.cc | 48 +++++++++++-------------- src/libstore/ipfs-binary-cache-store.hh | 23 ++++++++---- 2 files changed, 38 insertions(+), 33 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 8d65ca242bc..626dfdd938e 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -7,13 +7,20 @@ #include "archive.hh" #include "compression.hh" #include "names.hh" +#include "callback.hh" namespace nix { +IPFSBinaryCacheStore::IPFSBinaryCacheStore(const Params & params) + : IPFSBinaryCacheStore("ipfs", "", params) +{ } + IPFSBinaryCacheStore::IPFSBinaryCacheStore( - const Params & params, const Path & _cacheUri) + const std::string & scheme, const std::string & uri, const Params & params) : Store(params) - , cacheUri(_cacheUri) + , IPFSBinaryCacheStoreConfig(params) + , cacheScheme(scheme) + , cacheUri(uri) { auto state(_state.lock()); @@ -27,22 +34,18 @@ IPFSBinaryCacheStore::IPFSBinaryCacheStore( if (cacheUri.back() == '/') cacheUri.pop_back(); - if (hasPrefix(cacheUri, "ipfs://")) { - if (cacheUri == "ipfs://") { - allowModify = true; - } else { - initialIpfsPath = "/ipfs/" + std::string(cacheUri, 7); - state->ipfsPath = initialIpfsPath; - allowModify = get(params, "allow-modify").value_or("") == "true"; - } - } else if (hasPrefix(cacheUri, "ipns://")) { - ipnsPath = "/ipns/" + std::string(cacheUri, 7); + if (cacheScheme == "ipfs") { + initialIpfsPath = "/ipfs/" + cacheUri; + state->ipfsPath = initialIpfsPath; + allowModify = get(params, "allow-modify").value_or("") == "true"; + } else if (cacheScheme == "ipns") { + ipnsPath = "/ipns/" + cacheUri; // TODO: we should try to determine if we are able to modify // this ipns allowModify = true; } else - throw Error("unknown IPNS URI '%s'", cacheUri); + throw Error("unknown IPNS URI '%s'", getUri()); std::string ipfsAPIHost(get(params, "host").value_or("127.0.0.1")); std::string ipfsAPIPort(get(params, "port").value_or("5001")); @@ -173,7 +176,7 @@ void IPFSBinaryCacheStore::sync() if (!ipnsPath) { warn("created new store at '%s'. The old store at %s is immutable, so we can't update it", - "ipfs://" + std::string(state->ipfsPath, 6), cacheUri); + "ipfs://" + std::string(state->ipfsPath, 6), getUri()); return; } @@ -276,7 +279,7 @@ void IPFSBinaryCacheStore::upsertFile(const std::string & path, const std::strin addLink(path, "/ipfs/" + addFile(data)); } catch (FileTransferError & e) { // TODO: may wrap the inner error in a better way. - throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", cacheUri, e.msg()); + throw UploadToIPFS("while uploading to IPFS binary cache at '%s': %s", getUri(), e.msg()); } } @@ -413,7 +416,7 @@ void IPFSBinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSo if (!repair && isValidPath(info.path)) return; if (!allowModify) - throw Error("can't update '%s'", cacheUri); + throw Error("can't update '%s'", getUri()); /* Verify that all references are valid. This may do some .narinfo reads, but typically they'll already be cached. */ @@ -624,7 +627,7 @@ StorePath IPFSBinaryCacheStore::addTextToStore(const string & name, const string void IPFSBinaryCacheStore::addSignatures(const StorePath & storePath, const StringSet & sigs) { if (!allowModify) - throw Error("can't update '%s'", cacheUri); + throw Error("can't update '%s'", getUri()); /* Note: this is inherently racy since there is no locking on binary caches. In particular, with S3 this unreliable, even @@ -651,15 +654,6 @@ void IPFSBinaryCacheStore::addTempRoot(const StorePath & path) getFileTransfer()->upload(request); } -static RegisterStoreImplementation regStore([]( - const std::string & uri, const Store::Params & params) - -> std::shared_ptr -{ - if (uri.substr(0, strlen("ipfs://")) != "ipfs://" && - uri.substr(0, strlen("ipns://")) != "ipns://") - return 0; - auto store = std::make_shared(params, uri); - return store; -}); +static RegisterStoreImplementation regStore; } diff --git a/src/libstore/ipfs-binary-cache-store.hh b/src/libstore/ipfs-binary-cache-store.hh index dcb7ad0ac25..1239d21cb13 100644 --- a/src/libstore/ipfs-binary-cache-store.hh +++ b/src/libstore/ipfs-binary-cache-store.hh @@ -6,10 +6,9 @@ namespace nix { MakeError(UploadToIPFS, Error); -class IPFSBinaryCacheStore : public Store +struct IPFSBinaryCacheStoreConfig : virtual StoreConfig { - -public: + using StoreConfig::StoreConfig; const Setting compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; const Setting secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; @@ -20,13 +19,20 @@ public: const Setting _allowModify{this, false, "allow-modify", "allow Nix to update IPFS/IPNS address when appropriate"}; -private: + const std::string name() override { return "IPFS Store"; } +}; + +class IPFSBinaryCacheStore : public virtual Store, public virtual IPFSBinaryCacheStoreConfig +{ + +public: bool allowModify; std::unique_ptr secretKey; std::string narMagic; + std::string cacheScheme; std::string cacheUri; std::string daemonUri; @@ -45,13 +51,18 @@ private: public: - IPFSBinaryCacheStore(const Params & params, const Path & _cacheUri); + IPFSBinaryCacheStore(const Params & params); + + IPFSBinaryCacheStore(const std::string & scheme, const std::string & uri, const Params & params); std::string getUri() override { - return cacheUri; + return cacheScheme + "://" + cacheUri; } + static std::set uriSchemes() + { return {"ipfs", "ipns"}; } + private: std::string putIpfsDag(nlohmann::json data); From d868b5880b773815a141cc09017dc81b5489afe3 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 23 Sep 2020 20:47:30 +0000 Subject: [PATCH 103/104] Fix IPFS store constructors --- src/libstore/ipfs-binary-cache-store.cc | 8 ++------ src/libstore/ipfs-binary-cache-store.hh | 10 ++++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/libstore/ipfs-binary-cache-store.cc b/src/libstore/ipfs-binary-cache-store.cc index 626dfdd938e..12048a9ea4c 100644 --- a/src/libstore/ipfs-binary-cache-store.cc +++ b/src/libstore/ipfs-binary-cache-store.cc @@ -11,14 +11,10 @@ namespace nix { -IPFSBinaryCacheStore::IPFSBinaryCacheStore(const Params & params) - : IPFSBinaryCacheStore("ipfs", "", params) -{ } - IPFSBinaryCacheStore::IPFSBinaryCacheStore( const std::string & scheme, const std::string & uri, const Params & params) - : Store(params) - , IPFSBinaryCacheStoreConfig(params) + : StoreConfig(params) + , Store(params) , cacheScheme(scheme) , cacheUri(uri) { diff --git a/src/libstore/ipfs-binary-cache-store.hh b/src/libstore/ipfs-binary-cache-store.hh index 1239d21cb13..ec589e8e998 100644 --- a/src/libstore/ipfs-binary-cache-store.hh +++ b/src/libstore/ipfs-binary-cache-store.hh @@ -10,13 +10,13 @@ struct IPFSBinaryCacheStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; - const Setting compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; - const Setting secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"}; - const Setting parallelCompression{this, false, "parallel-compression", + const Setting compression{(StoreConfig *)this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"}; + const Setting secretKeyFile{(StoreConfig *)this, "", "secret-key", "path to secret key used to sign the binary cache"}; + const Setting parallelCompression{(StoreConfig *)this, false, "parallel-compression", "enable multi-threading compression, available for xz only currently"}; // FIXME: merge with allowModify bool - const Setting _allowModify{this, false, "allow-modify", + const Setting _allowModify{(StoreConfig *)this, false, "allow-modify", "allow Nix to update IPFS/IPNS address when appropriate"}; const std::string name() override { return "IPFS Store"; } @@ -51,8 +51,6 @@ public: public: - IPFSBinaryCacheStore(const Params & params); - IPFSBinaryCacheStore(const std::string & scheme, const std::string & uri, const Params & params); std::string getUri() override From b3008a2b4ae8a4fe2ba48d2d80d0927ec8f56a9b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 23 Sep 2020 20:48:08 +0000 Subject: [PATCH 104/104] Improve IPFS test a bit --- tests/ipfs.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/ipfs.sh b/tests/ipfs.sh index 17d60b9a7ef..9526bf28aff 100644 --- a/tests/ipfs.sh +++ b/tests/ipfs.sh @@ -1,5 +1,7 @@ source common.sh +set -o pipefail + # This are for ./fixed.nix export IMPURE_VAR1=foo export IMPURE_VAR2=bar @@ -55,12 +57,12 @@ touch $TEST_FILE # We try to do the evaluation with a known wrong hash to get the suggestion for # the correct one CORRECT_ADDRESS=$( \ - nix eval --raw --expr "builtins.fetchurl \"file://$PWD/$TEST_FILE\"" --store ipfs://$EMPTY_HASH?allow-modify=true --impure \ + nix eval --raw --impure --expr "builtins.fetchurl \"file://$PWD/$TEST_FILE\"" --store ipfs://$EMPTY_HASH?allow-modify=true \ |& grep '^warning: created new store' \ | sed "s/^warning: created new store at '\(.*\)'\. .*$/\1/") # Then we eval and get back the hash-name part of the store path -RESULT=$(nix eval '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store "$CORRECT_ADDRESS" --json \ +RESULT=$(nix eval --json --impure --expr '(builtins.fetchurl 'file://$PWD/$TEST_FILE')' --store "$CORRECT_ADDRESS" \ | jq -r | awk -F/ '{print $NF}') # Finally, we ask back the info from IPFS (formatting the address the right way @@ -126,7 +128,7 @@ IPFS_ADDRESS=$(nix copy --to ipfs://$EMPTY_HASH?allow-modify=true $(nix-build ./ # We want to check that the `allow-modify` flag is required for the command to # succeed. This is an invocation of the same command without that flag that we # expect to fail -! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command +(! nix copy --to ipfs://$EMPTY_HASH $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command) # Verify that new path is valid. nix copy --to $IPFS_ADDRESS $(nix-build ./fixed.nix -A good --no-out-link) --experimental-features nix-command