diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c index fab06874d687fc..02da53f0845a0c 100644 --- a/gvfs-helper-client.c +++ b/gvfs-helper-client.c @@ -13,7 +13,6 @@ static struct oidset gh_client__oidset_queued = OIDSET_INIT; static unsigned long gh_client__oidset_count; -static int gh_client__includes_immediate; struct gh_server__process { struct subprocess_entry subprocess; /* must be first */ @@ -24,13 +23,20 @@ static int gh_server__subprocess_map_initialized; static struct hashmap gh_server__subprocess_map; static struct object_directory *gh_client__chosen_odb; -#define CAP_GET (1u<<1) +/* + * The "objects" capability has 2 verbs: "get" and "post". + */ +#define CAP_OBJECTS (1u<<1) +#define CAP_OBJECTS_NAME "objects" + +#define CAP_OBJECTS__VERB_GET1_NAME "get" +#define CAP_OBJECTS__VERB_POST_NAME "post" static int gh_client__start_fn(struct subprocess_entry *subprocess) { static int versions[] = {1, 0}; static struct subprocess_capability capabilities[] = { - { "get", CAP_GET }, + { CAP_OBJECTS_NAME, CAP_OBJECTS }, { NULL, 0 } }; @@ -42,14 +48,16 @@ static int gh_client__start_fn(struct subprocess_entry *subprocess) } /* - * Send: + * Send the queued OIDs in the OIDSET to gvfs-helper for it to + * fetch from the cache-server or main Git server using "/gvfs/objects" + * POST semantics. * - * get LF + * objects.post LF * ( LF)* * * */ -static int gh_client__get__send_command(struct child_process *process) +static int gh_client__send__objects_post(struct child_process *process) { struct oidset_iter iter; struct object_id *oid; @@ -60,7 +68,9 @@ static int gh_client__get__send_command(struct child_process *process) * so that we don't have to. */ - err = packet_write_fmt_gently(process->in, "get\n"); + err = packet_write_fmt_gently( + process->in, + (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_POST_NAME "\n")); if (err) return err; @@ -79,6 +89,46 @@ static int gh_client__get__send_command(struct child_process *process) return 0; } +/* + * Send the given OID to gvfs-helper for it to fetch from the + * cache-server or main Git server using "/gvfs/objects" GET + * semantics. + * + * This ignores any queued OIDs. + * + * objects.get LF + * LF + * + * + */ +static int gh_client__send__objects_get(struct child_process *process, + const struct object_id *oid) +{ + int err; + + /* + * We assume that all of the packet_ routines call error() + * so that we don't have to. + */ + + err = packet_write_fmt_gently( + process->in, + (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_GET1_NAME "\n")); + if (err) + return err; + + err = packet_write_fmt_gently(process->in, "%s\n", + oid_to_hex(oid)); + if (err) + return err; + + err = packet_flush_gently(process->in); + if (err) + return err; + + return 0; +} + /* * Update the loose object cache to include the newly created * object. @@ -126,7 +176,7 @@ static void gh_client__update_packed_git(const char *line) } /* - * We expect: + * Both CAP_OBJECTS verbs return the same format response: * * * * @@ -157,7 +207,7 @@ static void gh_client__update_packed_git(const char *line) * grouped with a queued request for a blob. The tree-walk *might* be * able to continue and let the 404 blob be handled later. */ -static int gh_client__get__receive_response( +static int gh_client__objects__receive_response( struct child_process *process, enum gh_client__created *p_ghc, int *p_nr_loose, int *p_nr_packfile) @@ -236,17 +286,12 @@ static void gh_client__choose_odb(void) } } -static int gh_client__get(enum gh_client__created *p_ghc) +static struct gh_server__process *gh_client__find_long_running_process( + unsigned int cap_needed) { struct gh_server__process *entry; - struct child_process *process; struct strvec argv = STRVEC_INIT; struct strbuf quoted = STRBUF_INIT; - int nr_loose = 0; - int nr_packfile = 0; - int err = 0; - - trace2_region_enter("gh-client", "get", the_repository); gh_client__choose_odb(); @@ -262,6 +307,11 @@ static int gh_client__get(enum gh_client__created *p_ghc) sq_quote_argv_pretty("ed, argv.v); + /* + * Find an existing long-running process with the above command + * line -or- create a new long-running process for this and + * subsequent 'get' requests. + */ if (!gh_server__subprocess_map_initialized) { gh_server__subprocess_map_initialized = 1; hashmap_init(&gh_server__subprocess_map, @@ -275,70 +325,24 @@ static int gh_client__get(enum gh_client__created *p_ghc) entry = xmalloc(sizeof(*entry)); entry->supported_capabilities = 0; - err = subprocess_start_strvec( - &gh_server__subprocess_map, &entry->subprocess, 1, - &argv, gh_client__start_fn); - if (err) { - free(entry); - goto leave_region; - } + if (subprocess_start_strvec(&gh_server__subprocess_map, + &entry->subprocess, 1, + &argv, gh_client__start_fn)) + FREE_AND_NULL(entry); } - process = &entry->subprocess.process; - - if (!(CAP_GET & entry->supported_capabilities)) { - error("gvfs-helper: does not support GET"); - subprocess_stop(&gh_server__subprocess_map, - (struct subprocess_entry *)entry); - free(entry); - err = -1; - goto leave_region; - } - - sigchain_push(SIGPIPE, SIG_IGN); - - err = gh_client__get__send_command(process); - if (!err) - err = gh_client__get__receive_response(process, p_ghc, - &nr_loose, &nr_packfile); - - sigchain_pop(SIGPIPE); - - if (err) { + if (entry && + (entry->supported_capabilities & cap_needed) != cap_needed) { + error("gvfs-helper: does not support needed capabilities"); subprocess_stop(&gh_server__subprocess_map, (struct subprocess_entry *)entry); - free(entry); + FREE_AND_NULL(entry); } -leave_region: strvec_clear(&argv); strbuf_release("ed); - trace2_data_intmax("gh-client", the_repository, - "get/immediate", gh_client__includes_immediate); - - trace2_data_intmax("gh-client", the_repository, - "get/nr_objects", gh_client__oidset_count); - - if (nr_loose) - trace2_data_intmax("gh-client", the_repository, - "get/nr_loose", nr_loose); - - if (nr_packfile) - trace2_data_intmax("gh-client", the_repository, - "get/nr_packfile", nr_packfile); - - if (err) - trace2_data_intmax("gh-client", the_repository, - "get/error", err); - - trace2_region_leave("gh-client", "get", the_repository); - - oidset_clear(&gh_client__oidset_queued); - gh_client__oidset_count = 0; - gh_client__includes_immediate = 0; - - return err; + return entry; } void gh_client__queue_oid(const struct object_id *oid) @@ -365,27 +369,97 @@ void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr) gh_client__queue_oid(&oids[k]); } +/* + * Bulk fetch all of the queued OIDs in the OIDSET. + */ int gh_client__drain_queue(enum gh_client__created *p_ghc) { + struct gh_server__process *entry; + struct child_process *process; + int nr_loose = 0; + int nr_packfile = 0; + int err = 0; + *p_ghc = GHC__CREATED__NOTHING; if (!gh_client__oidset_count) return 0; - return gh_client__get(p_ghc); + entry = gh_client__find_long_running_process(CAP_OBJECTS); + if (!entry) + return -1; + + trace2_region_enter("gh-client", "objects/post", the_repository); + + process = &entry->subprocess.process; + + sigchain_push(SIGPIPE, SIG_IGN); + + err = gh_client__send__objects_post(process); + if (!err) + err = gh_client__objects__receive_response( + process, p_ghc, &nr_loose, &nr_packfile); + + sigchain_pop(SIGPIPE); + + if (err) { + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + FREE_AND_NULL(entry); + } + + trace2_data_intmax("gh-client", the_repository, + "objects/post/nr_objects", gh_client__oidset_count); + trace2_region_leave("gh-client", "objects/post", the_repository); + + oidset_clear(&gh_client__oidset_queued); + gh_client__oidset_count = 0; + + return err; } + +/* + * Get exactly 1 object immediately. + * Ignore any queued objects. + */ int gh_client__get_immediate(const struct object_id *oid, enum gh_client__created *p_ghc) { - gh_client__includes_immediate = 1; + struct gh_server__process *entry; + struct child_process *process; + int nr_loose = 0; + int nr_packfile = 0; + int err = 0; // TODO consider removing this trace2. it is useful for interactive // TODO debugging, but may generate way too much noise for a data // TODO event. trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid)); - if (!oidset_insert(&gh_client__oidset_queued, oid)) - gh_client__oidset_count++; + entry = gh_client__find_long_running_process(CAP_OBJECTS); + if (!entry) + return -1; + + trace2_region_enter("gh-client", "objects/get", the_repository); - return gh_client__drain_queue(p_ghc); + process = &entry->subprocess.process; + + sigchain_push(SIGPIPE, SIG_IGN); + + err = gh_client__send__objects_get(process, oid); + if (!err) + err = gh_client__objects__receive_response( + process, p_ghc, &nr_loose, &nr_packfile); + + sigchain_pop(SIGPIPE); + + if (err) { + subprocess_stop(&gh_server__subprocess_map, + (struct subprocess_entry *)entry); + FREE_AND_NULL(entry); + } + + trace2_region_leave("gh-client", "objects/get", the_repository); + + return err; } diff --git a/gvfs-helper.c b/gvfs-helper.c index f67c40a0a59aa8..8461adc2a2bd5e 100644 --- a/gvfs-helper.c +++ b/gvfs-helper.c @@ -46,7 +46,28 @@ // // get // -// Fetch 1 or more objects. If a cache-server is configured, +// Fetch 1 or more objects one at a time using a "/gvfs/objects" +// GET request. +// +// If a cache-server is configured, +// try it first. Optionally fallback to the main Git server. +// +// The set of objects is given on stdin and is assumed to be +// a list of , one per line. +// +// : +// +// --max-retries= // defaults to "6" +// +// Number of retries after transient network errors. +// Set to zero to disable such retries. +// +// post +// +// Fetch 1 or more objects in bulk using a "/gvfs/objects" POST +// request. +// +// If a cache-server is configured, // try it first. Optionally fallback to the main Git server. // // The set of objects is given on stdin and is assumed to be @@ -78,7 +99,8 @@ // --block-size= // defaults to "4000" // // Request objects from server in batches of at -// most n objects (not bytes). +// most n objects (not bytes) when using POST +// requests. // // --depth= // defaults to "1" // @@ -87,17 +109,27 @@ // Number of retries after transient network errors. // Set to zero to disable such retries. // -// Interactive verb: get +// Interactive verb: objects.get +// +// Fetch 1 or more objects, one at a time, using a +// "/gvfs/objects" GET requests. +// +// Each object will be created as a loose object in the ODB. +// +// Interactive verb: objects.post // -// Fetch 1 or more objects. If a cache-server is configured, -// try it first. Optionally fallback to the main Git server. +// Fetch 1 or more objects, in bulk, using one or more +// "/gvfs/objects" POST requests. +// +// For both verbs, if a cache-server is configured, try it first. +// Optionally fallback to the main Git server. // // Create 1 or more loose objects and/or packfiles in the // shared-cache ODB. (The pathname of the selected ODB is // reported at the beginning of the response; this should // match the pathname given on the command line). // -// git> get +// git> objects.get | objects.post // git> // git> // git> ... @@ -116,20 +148,6 @@ // [2] Documentation/technical/long-running-process-protocol.txt // [3] See GIT_TRACE_PACKET // -// Example: -// -// $ git -c core.virtualizeobjects=false -c core.usegvfshelper=false -// rev-list --objects --no-walk --missing=print HEAD -// | grep "^?" -// | sed 's/^?//' -// | git gvfs-helper get-missing -// -// Note: In this example, we need to turn off "core.virtualizeobjects" and -// "core.usegvfshelper" when building the list of objects. This prevents -// rev-list (in oid_object_info_extended() from automatically fetching -// them with read-object-hook or "gvfs-helper server" sub-process (and -// defeating the whole purpose of this example). -// ////////////////////////////////////////////////////////////////// #include "cache.h" @@ -162,15 +180,21 @@ static const char * const main_usage[] = { N_("git gvfs-helper [] config []"), N_("git gvfs-helper [] get []"), + N_("git gvfs-helper [] post []"), N_("git gvfs-helper [] server []"), NULL }; -static const char *const get_usage[] = { +static const char *const objects_get_usage[] = { N_("git gvfs-helper [] get []"), NULL }; +static const char *const objects_post_usage[] = { + N_("git gvfs-helper [] post []"), + NULL +}; + static const char *const server_usage[] = { N_("git gvfs-helper [] server []"), NULL @@ -179,12 +203,12 @@ static const char *const server_usage[] = { /* * "commitDepth" field in gvfs protocol */ -#define GH__DEFAULT_COMMIT_DEPTH 1 +#define GH__DEFAULT__OBJECTS_POST__COMMIT_DEPTH 1 /* * Chunk/block size in number of objects we request in each packfile */ -#define GH__DEFAULT_BLOCK_SIZE 4000 +#define GH__DEFAULT__OBJECTS_POST__BLOCK_SIZE 4000 /* * Retry attempts (after the initial request) for transient errors and 429s. @@ -278,6 +302,28 @@ static const char *gh__server_type_label[GH__SERVER_TYPE__NR] = { "(cs)" }; +enum gh__objects_mode { + /* + * Bulk fetch objects. + * + * But also, force the use of HTTP POST regardless of how many + * objects we are requesting. + * + * The GVFS Protocol treats requests for commit objects + * differently in GET and POST requests WRT whether it + * automatically also fetches the referenced trees. + */ + GH__OBJECTS_MODE__POST, + + /* + * Fetch objects one at a time using HTTP GET. + * + * Force the use of GET (primarily because of the commit + * object treatment). + */ + GH__OBJECTS_MODE__GET, +}; + struct gh__azure_throttle { unsigned long tstu_limit; @@ -333,7 +379,20 @@ enum gh__progress_state { * Parameters to drive an HTTP request (with any necessary retries). */ struct gh__request_params { - int b_is_post; /* POST=1 or GET=0 */ + /* + * b_is_post indicates if the current HTTP request is a POST=1 or + * a GET=0. This is a lower level field used to setup CURL and + * the tempfile used to receive the content. + * + * It is related to, but different from the GH__OBJECTS_MODE__ + * field that we present to the gvfs-helper client or in the CLI + * (which only concerns the semantics of the /gvfs/objects protocol + * on the set of requested OIDs). + * + * For example, we use an HTTP GET to get the /gvfs/config data + * into a buffer. + */ + int b_is_post; int b_write_to_file; /* write to file=1 or strbuf=0 */ int b_permit_cache_server_if_defined; @@ -496,8 +555,6 @@ enum gh__retry_mode { struct gh__response_status { struct strbuf error_message; struct strbuf content_type; - long response_code; /* http response code */ - CURLcode curl_code; enum gh__error_code ec; enum gh__retry_mode retry; intmax_t bytes_received; @@ -507,8 +564,6 @@ struct gh__response_status { #define GH__RESPONSE_STATUS_INIT { \ .error_message = STRBUF_INIT, \ .content_type = STRBUF_INIT, \ - .response_code = 0, \ - .curl_code = CURLE_OK, \ .ec = GH__ERROR_CODE__OK, \ .retry = GH__RETRY_MODE__SUCCESS, \ .bytes_received = 0, \ @@ -519,8 +574,6 @@ static void gh__response_status__zero(struct gh__response_status *s) { strbuf_setlen(&s->error_message, 0); strbuf_setlen(&s->content_type, 0); - s->response_code = 0; - s->curl_code = CURLE_OK; s->ec = GH__ERROR_CODE__OK; s->retry = GH__RETRY_MODE__SUCCESS; s->bytes_received = 0; @@ -574,15 +627,14 @@ static void log_e2eid(struct gh__request_params *params, } /* - * Normalize a few error codes before we try to decide + * Normalize a few HTTP response codes before we try to decide * how to dispatch on them. */ -static void gh__response_status__normalize_odd_codes( - struct gh__request_params *params, - struct gh__response_status *status) +static long gh__normalize_odd_codes(struct gh__request_params *params, + long http_response_code) { if (params->server_type == GH__SERVER_TYPE__CACHE && - status->response_code == 400) { + http_response_code == 400) { /* * The cache-server sends a somewhat bogus 400 instead of * the normal 401 when AUTH is required. Fixup the status @@ -594,16 +646,18 @@ static void gh__response_status__normalize_odd_codes( * TODO 401 for now. We should confirm the expected * TODO error message in the response-body. */ - status->response_code = 401; + return 401; } - if (status->response_code == 203) { + if (http_response_code == 203) { /* * A proxy server transformed a 200 from the origin server * into a 203. We don't care about the subtle distinction. */ - status->response_code = 200; + return 200; } + + return http_response_code; } /* @@ -613,9 +667,10 @@ static void gh__response_status__normalize_odd_codes( * https://docs.microsoft.com/en-us/azure/devops/integrate/concepts/rate-limits?view=azure-devops */ static void compute_retry_mode_from_http_response( - struct gh__response_status *status) + struct gh__response_status *status, + long http_response_code) { - switch (status->response_code) { + switch (http_response_code) { case 200: status->retry = GH__RETRY_MODE__SUCCESS; @@ -687,7 +742,7 @@ static void compute_retry_mode_from_http_response( hard_fail: strbuf_addf(&status->error_message, "(http:%d) Other [hard_fail]", - (int)status->response_code); + (int)http_response_code); status->retry = GH__RETRY_MODE__HARD_FAIL; status->ec = GH__ERROR_CODE__HTTP_OTHER; @@ -713,9 +768,10 @@ static void compute_retry_mode_from_http_response( * so I'm not going to fight that. */ static void compute_retry_mode_from_curl_error( - struct gh__response_status *status) + struct gh__response_status *status, + CURLcode curl_code) { - switch (status->curl_code) { + switch (curl_code) { case CURLE_OK: status->retry = GH__RETRY_MODE__SUCCESS; status->ec = GH__ERROR_CODE__OK; @@ -820,8 +876,7 @@ static void compute_retry_mode_from_curl_error( hard_fail: strbuf_addf(&status->error_message, "(curl:%d) %s [hard_fail]", - status->curl_code, - curl_easy_strerror(status->curl_code)); + curl_code, curl_easy_strerror(curl_code)); status->retry = GH__RETRY_MODE__HARD_FAIL; status->ec = GH__ERROR_CODE__CURL_ERROR; @@ -831,8 +886,7 @@ static void compute_retry_mode_from_curl_error( transient: strbuf_addf(&status->error_message, "(curl:%d) %s [transient]", - status->curl_code, - curl_easy_strerror(status->curl_code)); + curl_code, curl_easy_strerror(curl_code)); status->retry = GH__RETRY_MODE__TRANSIENT; status->ec = GH__ERROR_CODE__CURL_ERROR; @@ -851,26 +905,31 @@ static void gh__response_status__set_from_slot( struct gh__response_status *status, const struct active_request_slot *slot) { - status->curl_code = slot->results->curl_result; + long http_response_code; + CURLcode curl_code; + + curl_code = slot->results->curl_result; gh__curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE, &status->content_type); curl_easy_getinfo(slot->curl, CURLINFO_RESPONSE_CODE, - &status->response_code); + &http_response_code); strbuf_setlen(&status->error_message, 0); - gh__response_status__normalize_odd_codes(params, status); + http_response_code = gh__normalize_odd_codes(params, + http_response_code); /* * Use normalized response/status codes form curl/http to decide * how to set the error-code we propagate *AND* to decide if we * we should retry because of transient network problems. */ - if (status->curl_code == CURLE_OK || - status->curl_code == CURLE_HTTP_RETURNED_ERROR) - compute_retry_mode_from_http_response(status); + if (curl_code == CURLE_OK || + curl_code == CURLE_HTTP_RETURNED_ERROR) + compute_retry_mode_from_http_response(status, + http_response_code); else - compute_retry_mode_from_curl_error(status); + compute_retry_mode_from_curl_error(status, curl_code); if (status->ec != GH__ERROR_CODE__OK) status->bytes_received = 0; @@ -1028,8 +1087,8 @@ static void gh__run_one_slot(struct active_request_slot *slot, trace2_region_enter("gvfs-helper", key.buf, NULL); if (!start_active_slot(slot)) { - status->curl_code = CURLE_FAILED_INIT; /* a bit of a lie */ - compute_retry_mode_from_curl_error(status); + compute_retry_mode_from_curl_error(status, + CURLE_FAILED_INIT); } else { run_active_slot(slot); if (params->b_write_to_file) @@ -1060,7 +1119,7 @@ static void gh__run_one_slot(struct active_request_slot *slot, stop_progress(¶ms->progress); if (status->ec == GH__ERROR_CODE__OK && params->b_write_to_file) { - if (params->b_is_post) + if (params->b_is_post && params->object_count > 1) install_packfile(params, status); else install_loose(params, status); @@ -1216,8 +1275,8 @@ static void lookup_main_url(void) trace2_data_string("gvfs-helper", NULL, "remote/url", gh__global.main_url); } -static void do__gvfs_config(struct gh__response_status *status, - struct strbuf *config_data); +static void do__http_get__gvfs_config(struct gh__response_status *status, + struct strbuf *config_data); /* * Find the URL of the cache-server, if we have one. @@ -1276,7 +1335,7 @@ static void select_cache_server(void) * well-known by the main Git server. */ - do__gvfs_config(&status, &config_data); + do__http_get__gvfs_config(&status, &config_data); if (status.ec == GH__ERROR_CODE__OK) { /* @@ -1334,13 +1393,10 @@ static void select_cache_server(void) * Read stdin until EOF (or a blank line) and add the desired OIDs * to the oidset. * - * Stdin should contain a list of OIDs. It may have additional - * decoration that we need to strip out. - * - * We expect: - * [] // present OIDs + * Stdin should contain a list of OIDs. Lines may have additional + * text following the OID that we ignore. */ -static unsigned long read_stdin_from_rev_list(struct oidset *oids) +static unsigned long read_stdin_for_oids(struct oidset *oids) { struct object_id oid; struct strbuf buf_stdin = STRBUF_INIT; @@ -1362,17 +1418,23 @@ static unsigned long read_stdin_from_rev_list(struct oidset *oids) /* * Build a complete JSON payload for a gvfs/objects POST request - * containing the first n OIDs in an OIDSET index by the iterator. + * containing the first `nr_in_block` OIDs found in the OIDSET + * indexed by the given iterator. * * https://github.com/microsoft/VFSForGit/blob/master/Protocol.md + * + * Return the number of OIDs we actually put into the payload. + * If only 1 OID was found, also return it. */ static unsigned long build_json_payload__gvfs_objects( struct json_writer *jw_req, struct oidset_iter *iter, - unsigned long nr_in_block) + unsigned long nr_in_block, + struct object_id *oid_out) { unsigned long k; const struct object_id *oid; + const struct object_id *oid_prev = NULL; k = 0; @@ -1383,10 +1445,18 @@ static unsigned long build_json_payload__gvfs_objects( while (k < nr_in_block && (oid = oidset_iter_next(iter))) { jw_array_string(jw_req, oid_to_hex(oid)); k++; + oid_prev = oid; } jw_end(jw_req); jw_end(jw_req); + if (oid_out) { + if (k == 1) + oidcpy(oid_out, oid_prev); + else + oidclr(oid_out); + } + return k; } @@ -1627,6 +1697,33 @@ static void create_tempfile_for_packfile( strbuf_release(&basename); } +/* + * Create a pathname to the loose object in the shared-cache ODB + * with the given OID. Try to "mkdir -p" to ensure the parent + * directories exist. + */ +static int create_loose_pathname_in_odb(struct strbuf *buf_path, + const struct object_id *oid) +{ + enum scld_error scld; + const char *hex; + + hex = oid_to_hex(oid); + + strbuf_setlen(buf_path, 0); + strbuf_addbuf(buf_path, &gh__global.buf_odb_path); + strbuf_complete(buf_path, '/'); + strbuf_add(buf_path, hex, 2); + strbuf_addch(buf_path, '/'); + strbuf_addstr(buf_path, hex+2); + + scld = safe_create_leading_directories(buf_path->buf); + if (scld != SCLD_OK && scld != SCLD_EXISTS) + return -1; + + return 0; +} + /* * Create a tempfile to stream a loose object into. * @@ -1641,19 +1738,10 @@ static void create_tempfile_for_loose( { static int nth = 0; struct strbuf buf_path = STRBUF_INIT; - const char *hex; gh__response_status__zero(status); - hex = oid_to_hex(¶ms->loose_oid); - - strbuf_addbuf(&buf_path, &gh__global.buf_odb_path); - strbuf_complete(&buf_path, '/'); - strbuf_add(&buf_path, hex, 2); - - if (!file_exists(buf_path.buf) && - mkdir(buf_path.buf, 0777) == -1 && - !file_exists(buf_path.buf)) { + if (create_loose_pathname_in_odb(&buf_path, ¶ms->loose_oid)) { strbuf_addf(&status->error_message, "cannot create directory for loose object '%s'", buf_path.buf); @@ -1661,9 +1749,6 @@ static void create_tempfile_for_loose( goto cleanup; } - strbuf_addch(&buf_path, '/'); - strbuf_addstr(&buf_path, hex+2); - /* Remember the full path of the final destination. */ strbuf_setlen(¶ms->loose_path, 0); strbuf_addbuf(¶ms->loose_path, &buf_path); @@ -1705,7 +1790,7 @@ static void install_packfile(struct gh__request_params *params, if (strcmp(status->content_type.buf, "application/x-git-packfile")) { strbuf_addf(&status->error_message, - "received unknown content-type '%s'", + "install_packfile: received unknown content-type '%s'", status->content_type.buf); status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; goto cleanup; @@ -1783,6 +1868,19 @@ static void install_loose(struct gh__request_params *params, { struct strbuf tmp_path = STRBUF_INIT; + /* + * We expect a loose object when we do a GET -or- when we + * do a POST with only 1 object. + */ + if (strcmp(status->content_type.buf, + "application/x-git-loose-object")) { + strbuf_addf(&status->error_message, + "install_loose: received unknown content-type '%s'", + status->content_type.buf); + status->ec = GH__ERROR_CODE__UNEXPECTED_CONTENT_TYPE; + return; + } + gh__response_status__zero(status); /* @@ -1879,10 +1977,8 @@ static size_t parse_resp_hdr(char *buffer, size_t size, size_t nitems, * The following X- headers are specific to AzureDevOps. * Other servers have similar sets of values, but I haven't * compared them in depth. - * - * TODO Remove this. */ - trace2_printf("Throttle: %s %s", key.buf, val.buf); + // trace2_printf("Throttle: %s %s", key.buf, val.buf); if (!strcmp(key.buf, "X-RateLimit-Resource")) { /* @@ -2182,10 +2278,11 @@ static void do_req(const char *url_base, if (params->tempfile) delete_tempfile(¶ms->tempfile); - if (params->b_is_post) + if (params->b_is_post && params->object_count > 1) create_tempfile_for_packfile(params, status); else create_tempfile_for_loose(params, status); + if (!params->tempfile || status->ec != GH__ERROR_CODE__OK) return; } else { @@ -2340,7 +2437,7 @@ static void do_req__to_main(const char *url_component, &gh__global.main_creds, params, status); - if (status->response_code == 401) { + if (status->retry == GH__RETRY_MODE__HTTP_401) { refresh_main_creds(); do_req__with_robust_retry(gh__global.main_url, url_component, @@ -2348,7 +2445,7 @@ static void do_req__to_main(const char *url_component, params, status); } - if (status->response_code == 200) + if (status->retry == GH__RETRY_MODE__SUCCESS) approve_main_creds(); } @@ -2368,7 +2465,7 @@ static void do_req__to_cache_server(const char *url_component, &gh__global.cache_creds, params, status); - if (status->response_code == 401) { + if (status->retry == GH__RETRY_MODE__HTTP_401) { refresh_cache_server_creds(); do_req__with_robust_retry(gh__global.cache_server_url, @@ -2377,7 +2474,7 @@ static void do_req__to_cache_server(const char *url_component, params, status); } - if (status->response_code == 200) + if (status->retry == GH__RETRY_MODE__SUCCESS) approve_cache_server_creds(); } @@ -2392,7 +2489,7 @@ static void do_req__with_fallback(const char *url_component, params->b_permit_cache_server_if_defined) { do_req__to_cache_server(url_component, params, status); - if (status->response_code == 200) + if (status->retry == GH__RETRY_MODE__SUCCESS) return; if (!gh__cmd_opts.try_fallback) @@ -2407,7 +2504,7 @@ static void do_req__with_fallback(const char *url_component, * Falling-back would likely just cause the 3rd (or maybe * 4th) cred prompt. */ - if (status->response_code == 401) + if (status->retry == GH__RETRY_MODE__HTTP_401) return; } @@ -2419,8 +2516,8 @@ static void do_req__with_fallback(const char *url_component, * * Return server's response buffer. This is probably a raw JSON string. */ -static void do__gvfs_config(struct gh__response_status *status, - struct strbuf *config_data) +static void do__http_get__gvfs_config(struct gh__response_status *status, + struct strbuf *config_data) { struct gh__request_params params = GH__REQUEST_PARAMS_INIT; @@ -2460,12 +2557,34 @@ static void do__gvfs_config(struct gh__response_status *status, gh__request_params__release(¶ms); } +static void setup_gvfs_objects_progress(struct gh__request_params *params, + unsigned long num, unsigned long den) +{ + if (!gh__cmd_opts.show_progress) + return; + + if (params->b_is_post && params->object_count > 1) { + strbuf_addf(¶ms->progress_base_phase2_msg, + "Requesting packfile %ld/%ld with %ld objects", + num, den, params->object_count); + strbuf_addf(¶ms->progress_base_phase3_msg, + "Receiving packfile %ld/%ld with %ld objects", + num, den, params->object_count); + } else { + strbuf_addf(¶ms->progress_base_phase3_msg, + "Receiving %ld/%ld loose object", + num, den); + } +} + /* * Call "gvfs/objects/" REST API to fetch a loose object * and write it to the ODB. */ -static void do__loose__gvfs_object(struct gh__response_status *status, - const struct object_id *oid) +static void do__http_get__gvfs_object(struct gh__response_status *status, + const struct object_id *oid, + unsigned long l_num, unsigned long l_den, + struct string_list *result_list) { struct gh__request_params params = GH__REQUEST_PARAMS_INIT; struct strbuf component_url = STRBUF_INIT; @@ -2490,44 +2609,52 @@ static void do__loose__gvfs_object(struct gh__response_status *status, oidcpy(¶ms.loose_oid, oid); - if (gh__cmd_opts.show_progress) { - /* - * Likewise, a gvfs/objects/{oid} has a very small reqest - * payload, so I don't see any need to report progress on - * the upload side of the GET. So just report progress - * on the download side. - */ - strbuf_addstr(¶ms.progress_base_phase3_msg, - "Receiving 1 loose object"); - } + setup_gvfs_objects_progress(¶ms, l_num, l_den); do_req__with_fallback(component_url.buf, ¶ms, status); + if (status->ec == GH__ERROR_CODE__OK) { + struct strbuf msg = STRBUF_INIT; + + strbuf_addf(&msg, "loose %s", + oid_to_hex(¶ms.loose_oid)); + + string_list_append(result_list, msg.buf); + strbuf_release(&msg); + } + gh__request_params__release(¶ms); strbuf_release(&component_url); } /* - * Call "gvfs/objects" POST REST API to fetch a packfile containing - * the objects in the requested OIDSET. Returns the filename (not - * pathname) to the new packfile. + * Call "gvfs/objects" POST REST API to fetch a batch of objects + * from the OIDSET. Normal, this is results in a packfile containing + * `nr_wanted_in_block` objects. And we return the number actually + * consumed (along with the filename of the resulting packfile). + * + * However, if we only have 1 oid (remaining) in the OIDSET, the + * server will respond to our POST with a loose object rather than + * a packfile with 1 object. + * + * Append a message to the result_list describing the result. + * + * Return the number of OIDs consumed from the OIDSET. */ -static void do__packfile__gvfs_objects(struct gh__response_status *status, - struct oidset_iter *iter, - unsigned long nr_wanted_in_block, - int j_pack_num, int j_pack_den, - struct strbuf *output_filename, - unsigned long *nr_oid_taken) +static void do__http_post__gvfs_objects(struct gh__response_status *status, + struct oidset_iter *iter, + unsigned long nr_wanted_in_block, + int j_pack_num, int j_pack_den, + struct string_list *result_list, + unsigned long *nr_oid_taken) { struct json_writer jw_req = JSON_WRITER_INIT; struct gh__request_params params = GH__REQUEST_PARAMS_INIT; - strbuf_setlen(output_filename, 0); - gh__response_status__zero(status); params.object_count = build_json_payload__gvfs_objects( - &jw_req, iter, nr_wanted_in_block); + &jw_req, iter, nr_wanted_in_block, ¶ms.loose_oid); *nr_oid_taken = params.object_count; strbuf_addstr(¶ms.tr2_label, "POST/objects"); @@ -2547,7 +2674,7 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status, "Content-Type: application/json"); /* * We really always want a packfile. But if the payload only - * requests 1 OID, the server will/may send us a single loose + * requests 1 OID, the server will send us a single loose * objects instead. (Apparently the server ignores us when we * only send application/x-git-packfile and does it anyway.) * @@ -2559,156 +2686,172 @@ static void do__packfile__gvfs_objects(struct gh__response_status *status, params.headers = curl_slist_append(params.headers, "Accept: application/x-git-loose-object"); - if (gh__cmd_opts.show_progress) { - strbuf_addf(¶ms.progress_base_phase2_msg, - "Requesting packfile %d/%d with %ld objects", - j_pack_num, j_pack_den, - params.object_count); - strbuf_addf(¶ms.progress_base_phase3_msg, - "Receiving packfile %d/%d with %ld objects", - j_pack_num, j_pack_den, - params.object_count); - } + setup_gvfs_objects_progress(¶ms, j_pack_num, j_pack_den); do_req__with_fallback("gvfs/objects", ¶ms, status); - if (status->ec == GH__ERROR_CODE__OK) - strbuf_addbuf(output_filename, ¶ms.final_packfile_filename); + + if (status->ec == GH__ERROR_CODE__OK) { + struct strbuf msg = STRBUF_INIT; + + if (params.object_count > 1) + strbuf_addf(&msg, "packfile %s", + params.final_packfile_filename.buf); + else + strbuf_addf(&msg, "loose %s", + oid_to_hex(¶ms.loose_oid)); + + string_list_append(result_list, msg.buf); + strbuf_release(&msg); + } gh__request_params__release(¶ms); jw_release(&jw_req); } /* - * Bulk or individually fetch a list of objects in one or more http requests. - * Create one or more packfiles and/or loose objects. + * Drive one or more HTTP GET requests to fetch the objects + * in the given OIDSET. These are received into loose objects. * - * We accumulate results for each request in `result_list` until we get a + * Accumulate results for each request in `result_list` until we get a * hard error and have to stop. */ -static void do_fetch_oidset(struct gh__response_status *status, - struct oidset *oids, - unsigned long nr_oid_total, - struct string_list *result_list) +static void do__http_get__fetch_oidset(struct gh__response_status *status, + struct oidset *oids, + unsigned long nr_oid_total, + struct string_list *result_list) { struct oidset_iter iter; - struct strbuf output_filename = STRBUF_INIT; - struct strbuf msg = STRBUF_INIT; struct strbuf err404 = STRBUF_INIT; const struct object_id *oid; unsigned long k; - unsigned long nr_oid_taken; int had_404 = 0; - int j_pack_den = 0; - int j_pack_num = 0; gh__response_status__zero(status); if (!nr_oid_total) return; - if (nr_oid_total > 1) - j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1) - / gh__cmd_opts.block_size); - oidset_iter_init(oids, &iter); - for (k = 0; k < nr_oid_total; k += nr_oid_taken) { - if (nr_oid_total - k == 1 || gh__cmd_opts.block_size == 1) { - oid = oidset_iter_next(&iter); - nr_oid_taken = 1; + for (k = 0; k < nr_oid_total; k++) { + oid = oidset_iter_next(&iter); - do__loose__gvfs_object(status, oid); + do__http_get__gvfs_object(status, oid, k+1, nr_oid_total, + result_list); + /* + * If we get a 404 for an individual object, ignore + * it and get the rest. We'll fixup the 'ec' later. + */ + if (status->ec == GH__ERROR_CODE__HTTP_404) { + if (!err404.len) + strbuf_addf(&err404, "%s: from GET %s", + status->error_message.buf, + oid_to_hex(oid)); /* - * If we get a 404 for an individual object, ignore - * it and get the rest. We'll fixup the 'ec' later. + * Mark the fetch as "incomplete", but don't + * stop trying to get other chunks. */ - if (status->ec == GH__ERROR_CODE__HTTP_404) { - if (!err404.len) - strbuf_addf(&err404, "%s: loose object %s", - status->error_message.buf, - oid_to_hex(oid)); - /* - * Mark the fetch as "incomplete", but don't - * stop trying to get other chunks. - */ - had_404 = 1; - continue; - } + had_404 = 1; + continue; + } - if (status->ec != GH__ERROR_CODE__OK) { - /* Stop at the first hard error. */ - strbuf_addf(&status->error_message, ": loose %s", - oid_to_hex(oid)); - goto cleanup; - } + if (status->ec != GH__ERROR_CODE__OK) { + /* Stop at the first hard error. */ + strbuf_addf(&status->error_message, ": from GET %s", + oid_to_hex(oid)); + goto cleanup; + } + } - strbuf_setlen(&msg, 0); - strbuf_addf(&msg, "loose %s", oid_to_hex(oid)); - string_list_append(result_list, msg.buf); +cleanup: + if (had_404 && status->ec == GH__ERROR_CODE__OK) { + strbuf_setlen(&status->error_message, 0); + strbuf_addbuf(&status->error_message, &err404); + status->ec = GH__ERROR_CODE__HTTP_404; + } - } else { - strbuf_setlen(&output_filename, 0); + strbuf_release(&err404); +} - j_pack_num++; +/* + * Drive one or more HTTP POST requests to bulk fetch the objects in + * the given OIDSET. Create one or more packfiles and/or loose objects. + * + * Accumulate results for each request in `result_list` until we get a + * hard error and have to stop. + */ +static void do__http_post__fetch_oidset(struct gh__response_status *status, + struct oidset *oids, + unsigned long nr_oid_total, + struct string_list *result_list) +{ + struct oidset_iter iter; + struct strbuf err404 = STRBUF_INIT; + unsigned long k; + unsigned long nr_oid_taken; + int j_pack_den = 0; + int j_pack_num = 0; + int had_404 = 0; + + gh__response_status__zero(status); + if (!nr_oid_total) + return; - do__packfile__gvfs_objects(status, &iter, - gh__cmd_opts.block_size, - j_pack_num, j_pack_den, - &output_filename, - &nr_oid_taken); + oidset_iter_init(oids, &iter); + + j_pack_den = ((nr_oid_total + gh__cmd_opts.block_size - 1) + / gh__cmd_opts.block_size); + + for (k = 0; k < nr_oid_total; k += nr_oid_taken) { + j_pack_num++; + do__http_post__gvfs_objects(status, &iter, + gh__cmd_opts.block_size, + j_pack_num, j_pack_den, + result_list, + &nr_oid_taken); + + /* + * Because the oidset iterator has random + * order, it does no good to say the k-th or + * n-th chunk was incomplete; the client + * cannot use that index for anything. + * + * We get a 404 when at least one object in + * the chunk was not found. + * + * For now, ignore the 404 and go on to the + * next chunk and then fixup the 'ec' later. + */ + if (status->ec == GH__ERROR_CODE__HTTP_404) { + if (!err404.len) + strbuf_addf(&err404, + "%s: from POST", + status->error_message.buf); /* - * Because the oidset iterator has random - * order, it does no good to say the k-th or - * n-th chunk was incomplete; the client - * cannot use that index for anything. - * - * We get a 404 when at least one object in - * the chunk was not found. - * - * TODO Consider various retry strategies (such as - * TODO loose or bisect) on the members within this - * TODO chunk to reduce the impact of the miss. - * - * For now, ignore the 404 and go on to the - * next chunk and then fixup the 'ec' later. + * Mark the fetch as "incomplete", but don't + * stop trying to get other chunks. */ - if (status->ec == GH__ERROR_CODE__HTTP_404) { - if (!err404.len) - strbuf_addf(&err404, - "%s: packfile object", - status->error_message.buf); - /* - * Mark the fetch as "incomplete", but don't - * stop trying to get other chunks. - */ - had_404 = 1; - continue; - } - - if (status->ec != GH__ERROR_CODE__OK) { - /* Stop at the first hard error. */ - strbuf_addstr(&status->error_message, - ": in packfile"); - goto cleanup; - } + had_404 = 1; + continue; + } - strbuf_setlen(&msg, 0); - strbuf_addf(&msg, "packfile %s", output_filename.buf); - string_list_append(result_list, msg.buf); + if (status->ec != GH__ERROR_CODE__OK) { + /* Stop at the first hard error. */ + strbuf_addstr(&status->error_message, + ": from POST"); + goto cleanup; } } cleanup: - strbuf_release(&msg); - strbuf_release(&err404); - strbuf_release(&output_filename); - if (had_404 && status->ec == GH__ERROR_CODE__OK) { strbuf_setlen(&status->error_message, 0); - strbuf_addstr(&status->error_message, "404 Not Found"); + strbuf_addbuf(&status->error_message, &err404); status->ec = GH__ERROR_CODE__HTTP_404; } + + strbuf_release(&err404); } /* @@ -2746,7 +2889,7 @@ static enum gh__error_code do_sub_cmd__config(int argc, const char **argv) finish_init(0); - do__gvfs_config(&status, &config_data); + do__http_get__gvfs_config(&status, &config_data); ec = status.ec; if (ec == GH__ERROR_CODE__OK) @@ -2761,12 +2904,61 @@ static enum gh__error_code do_sub_cmd__config(int argc, const char **argv) } /* - * Read a list of objects from stdin and fetch them in a single request (or - * multiple block-size requests). + * Read a list of objects from stdin and fetch them as a series of + * single object HTTP GET requests. */ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) { static struct option get_options[] = { + OPT_INTEGER('r', "max-retries", &gh__cmd_opts.max_retries, + N_("retries for transient network errors")), + OPT_END(), + }; + + struct gh__response_status status = GH__RESPONSE_STATUS_INIT; + struct oidset oids = OIDSET_INIT; + struct string_list result_list = STRING_LIST_INIT_DUP; + enum gh__error_code ec = GH__ERROR_CODE__OK; + unsigned long nr_oid_total; + int k; + + trace2_cmd_mode("get"); + + if (argc > 1 && !strcmp(argv[1], "-h")) + usage_with_options(objects_get_usage, get_options); + + argc = parse_options(argc, argv, NULL, get_options, objects_get_usage, 0); + if (gh__cmd_opts.max_retries < 0) + gh__cmd_opts.max_retries = 0; + + finish_init(1); + + nr_oid_total = read_stdin_for_oids(&oids); + + do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list); + + ec = status.ec; + + for (k = 0; k < result_list.nr; k++) + printf("%s\n", result_list.items[k].string); + + if (ec != GH__ERROR_CODE__OK) + error("get: %s", status.error_message.buf); + + gh__response_status__release(&status); + oidset_clear(&oids); + string_list_clear(&result_list, 0); + + return ec; +} + +/* + * Read a list of objects from stdin and fetch them in a single request (or + * multiple block-size requests) using one or more HTTP POST requests. + */ +static enum gh__error_code do_sub_cmd__post(int argc, const char **argv) +{ + static struct option post_options[] = { OPT_MAGNITUDE('b', "block-size", &gh__cmd_opts.block_size, N_("number of objects to request at a time")), OPT_INTEGER('d', "depth", &gh__cmd_opts.depth, @@ -2783,12 +2975,12 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) unsigned long nr_oid_total; int k; - trace2_cmd_mode("get"); + trace2_cmd_mode("post"); if (argc > 1 && !strcmp(argv[1], "-h")) - usage_with_options(get_usage, get_options); + usage_with_options(objects_post_usage, post_options); - argc = parse_options(argc, argv, NULL, get_options, get_usage, 0); + argc = parse_options(argc, argv, NULL, post_options, objects_post_usage, 0); if (gh__cmd_opts.depth < 1) gh__cmd_opts.depth = 1; if (gh__cmd_opts.max_retries < 0) @@ -2796,12 +2988,9 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) finish_init(1); - nr_oid_total = read_stdin_from_rev_list(&oids); + nr_oid_total = read_stdin_for_oids(&oids); - trace2_region_enter("gvfs-helper", "get", NULL); - trace2_data_intmax("gvfs-helper", NULL, "get/nr_objects", nr_oid_total); - do_fetch_oidset(&status, &oids, nr_oid_total, &result_list); - trace2_region_leave("gvfs-helper", "get", NULL); + do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list); ec = status.ec; @@ -2809,7 +2998,7 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) printf("%s\n", result_list.items[k].string); if (ec != GH__ERROR_CODE__OK) - error("get: %s", status.error_message.buf); + error("post: %s", status.error_message.buf); gh__response_status__release(&status); oidset_clear(&oids); @@ -2819,12 +3008,14 @@ static enum gh__error_code do_sub_cmd__get(int argc, const char **argv) } /* - * Handle the 'get' command when in "server mode". Only call error() and set ec - * for hard errors where we cannot communicate correctly with the foreground - * client process. Pass any actual data errors (such as 404's or 401's from - * the fetch back to the client process. + * Handle the 'objects.get' and 'objects.post' verbs in "server mode". + * + * Only call error() and set ec for hard errors where we cannot + * communicate correctly with the foreground client process. Pass any + * actual data errors (such as 404's or 401's from the fetch) back to + * the client process. */ -static enum gh__error_code do_server_subprocess_get(void) +static enum gh__error_code do_server_subprocess__objects(const char *verb_line) { struct gh__response_status status = GH__RESPONSE_STATUS_INIT; struct oidset oids = OIDSET_INIT; @@ -2835,12 +3026,19 @@ static enum gh__error_code do_server_subprocess_get(void) int len; int err; int k; + enum gh__objects_mode objects_mode; unsigned long nr_oid_total = 0; - /* - * Inside the "get" command, we expect a list of OIDs - * and a flush. - */ + if (!strcmp(verb_line, "objects.get")) + objects_mode = GH__OBJECTS_MODE__GET; + else if (!strcmp(verb_line, "objects.post")) + objects_mode = GH__OBJECTS_MODE__POST; + else { + error("server: unexpected objects-mode verb '%s'", verb_line); + ec = GH__ERROR_CODE__SUBPROCESS_SYNTAX; + goto cleanup; + } + while (1) { len = packet_read_line_gently(0, NULL, &line); if (len < 0 || !line) @@ -2865,10 +3063,10 @@ static enum gh__error_code do_server_subprocess_get(void) goto cleanup; } - trace2_region_enter("gvfs-helper", "server/get", NULL); - trace2_data_intmax("gvfs-helper", NULL, "server/get/nr_objects", nr_oid_total); - do_fetch_oidset(&status, &oids, nr_oid_total, &result_list); - trace2_region_leave("gvfs-helper", "server/get", NULL); + if (objects_mode == GH__OBJECTS_MODE__GET) + do__http_get__fetch_oidset(&status, &oids, nr_oid_total, &result_list); + else + do__http_post__fetch_oidset(&status, &oids, nr_oid_total, &result_list); /* * Write pathname of the ODB where we wrote all of the objects @@ -2923,7 +3121,7 @@ static enum gh__error_code do_server_subprocess_get(void) return ec; } -typedef enum gh__error_code (fn_subprocess_cmd)(void); +typedef enum gh__error_code (fn_subprocess_cmd)(const char *verb_line); struct subprocess_capability { const char *name; @@ -2932,7 +3130,7 @@ struct subprocess_capability { }; static struct subprocess_capability caps[] = { - { "get", 0, do_server_subprocess_get }, + { "objects", 0, do_server_subprocess__objects }, { NULL, 0, NULL }, }; @@ -3063,8 +3261,9 @@ static enum gh__error_code do_sub_cmd__server(int argc, const char **argv) } for (k = 0; caps[k].name; k++) { - if (caps[k].client_has && !strcmp(line, caps[k].name)) { - ec = (caps[k].pfn)(); + if (caps[k].client_has && + starts_with(line, caps[k].name)) { + ec = (caps[k].pfn)(line); if (ec != GH__ERROR_CODE__OK) goto cleanup; goto top_of_loop; @@ -3085,6 +3284,9 @@ static enum gh__error_code do_sub_cmd(int argc, const char **argv) if (!strcmp(argv[0], "get")) return do_sub_cmd__get(argc, argv); + if (!strcmp(argv[0], "post")) + return do_sub_cmd__post(argc, argv); + if (!strcmp(argv[0], "config")) return do_sub_cmd__config(argc, argv); @@ -3135,8 +3337,8 @@ int cmd_main(int argc, const char **argv) setup_git_directory_gently(NULL); /* Set any non-zero initial values in gh__cmd_opts. */ - gh__cmd_opts.depth = GH__DEFAULT_COMMIT_DEPTH; - gh__cmd_opts.block_size = GH__DEFAULT_BLOCK_SIZE; + gh__cmd_opts.depth = GH__DEFAULT__OBJECTS_POST__COMMIT_DEPTH; + gh__cmd_opts.block_size = GH__DEFAULT__OBJECTS_POST__BLOCK_SIZE; gh__cmd_opts.max_retries = GH__DEFAULT_MAX_RETRIES; gh__cmd_opts.max_transient_backoff_sec = GH__DEFAULT_MAX_TRANSIENT_BACKOFF_SEC;