diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 4017ed82ca4341..c19530b086311a 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,8 +1,10 @@
- - [ ] I was not able to find an [open](https://github.com/git-for-windows/git/issues?q=is%3Aopen) or [closed](https://github.com/git-for-windows/git/issues?q=is%3Aclosed) issue matching what I'm seeing
+ - [ ] I was not able to find an [open](https://github.com/microsoft/git/issues?q=is%3Aopen)
+ or [closed](https://github.com/microsoft/git/issues?q=is%3Aclosed) issue matching
+ what I'm seeing, including in [the `git-for-windows/git` tracker](https://github.com/git-for-windows/git/issues).
### Setup
- - Which version of Git for Windows are you using? Is it 32-bit or 64-bit?
+ - Which version of `microsoft/git` are you using? Is it 32-bit or 64-bit?
```
$ git --version --build-options
@@ -10,24 +12,22 @@ $ git --version --build-options
** insert your machine's response here **
```
- - Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
+Are you using Scalar or VFS for Git?
+
+** insert your answer here **
+
+If VFS for Git, then what version?
```
-$ cmd.exe /c ver
+$ gvfs version
** insert your machine's response here **
```
- - What options did you set as part of the installation? Or did you choose the
- defaults?
+ - Which version of Windows are you running? Vista, 7, 8, 10? Is it 32-bit or 64-bit?
```
-# One of the following:
-> type "C:\Program Files\Git\etc\install-options.txt"
-> type "C:\Program Files (x86)\Git\etc\install-options.txt"
-> type "%USERPROFILE%\AppData\Local\Programs\Git\etc\install-options.txt"
-> type "$env:USERPROFILE\AppData\Local\Programs\Git\etc\install-options.txt"
-$ cat /etc/install-options.txt
+$ cmd.exe /c ver
** insert your machine's response here **
```
@@ -58,7 +58,11 @@ $ cat /etc/install-options.txt
** insert here **
- - If the problem was occurring with a specific repository, can you provide the
- URL to that repository to help us with testing?
+ - If the problem was occurring with a specific repository, can you specify
+ the repository?
-** insert URL here **
+ * [ ] Public repo: **insert URL here**
+ * [ ] Windows monorepo
+ * [ ] Office monorepo
+ * [ ] Other Microsoft-internal repo: **insert name here**
+ * [ ] Other internal repo.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 831ef6f19f1d11..3cb48d8582f31c 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,19 +1,10 @@
Thanks for taking the time to contribute to Git!
-Those seeking to contribute to the Git for Windows fork should see
-http://gitforwindows.org/#contribute on how to contribute Windows specific
-enhancements.
-
-If your contribution is for the core Git functions and documentation
-please be aware that the Git community does not use the github.com issues
-or pull request mechanism for their contributions.
-
-Instead, we use the Git mailing list (git@vger.kernel.org) for code and
-documentation submissions, code reviews, and bug reports. The
-mailing list is plain text only (anything with HTML is sent directly
-to the spam folder).
-
-Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
-to conveniently send your Pull Requests commits to our mailing list.
-
-Please read the "guidelines for contributing" linked above!
+This fork contains changes specific to monorepo scenarios. If you are an
+external contributor, then please detail your reason for submitting to
+this fork:
+
+* [ ] This is an early version of work already under review upstream.
+* [ ] This change only applies to interactions with Azure DevOps and the
+ GVFS Protocol.
+* [ ] This change only applies to the virtualization hook and VFS for Git.
diff --git a/.github/macos-installer/Makefile b/.github/macos-installer/Makefile
new file mode 100644
index 00000000000000..df339bd921df23
--- /dev/null
+++ b/.github/macos-installer/Makefile
@@ -0,0 +1,116 @@
+SHELL := /bin/bash
+SUDO := sudo
+C_INCLUDE_PATH := /usr/include
+CPLUS_INCLUDE_PATH := /usr/include
+LD_LIBRARY_PATH := /usr/lib
+
+OSX_VERSION := $(shell sw_vers -productVersion)
+TARGET_FLAGS := -mmacosx-version-min=$(OSX_VERSION) -DMACOSX_DEPLOYMENT_TARGET=$(OSX_VERSION)
+
+ARCH := x86_64
+ARCH_CODE := x86_64
+ARCH_FLAGS_x86_64 := -arch x86_64
+
+CFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS_${ARCH_CODE})
+LDFLAGS := $(TARGET_FLAGS) $(ARCH_FLAGS_${ARCH_CODE})
+
+PREFIX := /usr/local
+GIT_PREFIX := $(PREFIX)/git
+
+BUILD_CODE := intel-$(ARCH_CODE)
+BUILD_DIR := $(GITHUB_WORKSPACE)/payload
+DESTDIR := $(PWD)/stage/git-$(BUILD_CODE)-$(VERSION)
+ARTIFACTDIR := build_artifacts
+SUBMAKE := $(MAKE) C_INCLUDE_PATH="$(C_INCLUDE_PATH)" CPLUS_INCLUDE_PATH="$(CPLUS_INCLUDE_PATH)" LD_LIBRARY_PATH="$(LD_LIBRARY_PATH)" TARGET_FLAGS="$(TARGET_FLAGS)" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" NO_GETTEXT=1 NO_DARWIN_PORTS=1 prefix=$(GIT_PREFIX) GIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)" DESTDIR=$(DESTDIR)
+CORES := $(shell bash -c "sysctl hw.ncpu | awk '{print \$$2}'")
+
+.PHONY: image pkg payload
+
+.SECONDARY:
+
+$(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(BUILD_CODE):
+ rm -f $(BUILD_DIR)/git-$(VERSION)/osx-installed*
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-keychain:
+ cd $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain; $(SUBMAKE) CFLAGS="$(CFLAGS) -g -O2 -Wall"
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built:
+ [ -d $(DESTDIR)$(GIT_PREFIX) ] && $(SUDO) rm -rf $(DESTDIR) || echo ok
+ cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) -j $(CORES) all strip
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-bin: $(BUILD_DIR)/git-$(VERSION)/osx-built $(BUILD_DIR)/git-$(VERSION)/osx-built-keychain
+ cd $(BUILD_DIR)/git-$(VERSION); $(SUBMAKE) install
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain $(DESTDIR)$(GIT_PREFIX)/bin/git-credential-osxkeychain
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/contrib/completion
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.bash $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-completion.zsh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+ cp $(BUILD_DIR)/git-$(VERSION)/contrib/completion/git-prompt.sh $(DESTDIR)$(GIT_PREFIX)/contrib/completion/
+ # This is needed for Git-Gui, GitK
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl
+ [ ! -f $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm ] && cp $(BUILD_DIR)/git-$(VERSION)/perl/private-Error.pm $(DESTDIR)$(GIT_PREFIX)/lib/perl5/site_perl/Error.pm || echo done
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-man: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/share/man
+ cp -R $(GITHUB_WORKSPACE)/manpages/ $(DESTDIR)$(GIT_PREFIX)/share/man
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-subtree:
+ cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" all git-subtree.1
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree: $(BUILD_DIR)/git-$(VERSION)/osx-built-subtree
+ mkdir -p $(DESTDIR)
+ cd $(BUILD_DIR)/git-$(VERSION)/contrib/subtree; $(SUBMAKE) XML_CATALOG_FILES="$(XML_CATALOG_FILES)" install install-man
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed-assets: $(BUILD_DIR)/git-$(VERSION)/osx-installed-bin
+ mkdir -p $(DESTDIR)$(GIT_PREFIX)/etc
+ cat assets/etc/gitconfig.osxkeychain >> $(DESTDIR)$(GIT_PREFIX)/etc/gitconfig
+ cp assets/uninstall.sh $(DESTDIR)$(GIT_PREFIX)/uninstall.sh
+ sh -c "echo .DS_Store >> $(DESTDIR)$(GIT_PREFIX)/share/git-core/templates/info/exclude"
+
+symlinks:
+ mkdir -p $(ARTIFACTDIR)$(PREFIX)/bin
+ cd $(ARTIFACTDIR)$(PREFIX)/bin; find ../git/bin -type f -exec ln -sf {} \;
+ for man in man1 man3 man5 man7; do mkdir -p $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; (cd $(ARTIFACTDIR)$(PREFIX)/share/man/$$man; ln -sf ../../../git/share/man/$$man/* ./); done
+ ruby ../scripts/symlink-git-hardlinks.rb $(ARTIFACTDIR)
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-installed: $(DESTDIR)$(GIT_PREFIX)/VERSION-$(VERSION)-$(BUILD_CODE) $(BUILD_DIR)/git-$(VERSION)/osx-installed-man $(BUILD_DIR)/git-$(VERSION)/osx-installed-assets $(BUILD_DIR)/git-$(VERSION)/osx-installed-subtree
+ find $(DESTDIR)$(GIT_PREFIX) -type d -exec chmod ugo+rx {} \;
+ find $(DESTDIR)$(GIT_PREFIX) -type f -exec chmod ugo+r {} \;
+ touch $@
+
+$(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_CODE): $(BUILD_DIR)/git-$(VERSION)/osx-built
+ifeq ("$(ARCH_CODE)", "universal")
+ File $(BUILD_DIR)/git-$(VERSION)/git
+ File $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain
+else
+ [ "$$(File $(BUILD_DIR)/git-$(VERSION)/git | cut -f 5 -d' ')" == "$(ARCH_CODE)" ]
+ [ "$$(File $(BUILD_DIR)/git-$(VERSION)/contrib/credential/osxkeychain/git-credential-osxkeychain | cut -f 5 -d' ')" == "$(ARCH_CODE)" ]
+endif
+ touch $@
+
+disk-image/VERSION-$(VERSION)-$(ARCH_CODE):
+ rm -f disk-image/*.pkg disk-image/VERSION-* disk-image/.DS_Store
+ mkdir disk-image
+ touch "$@"
+
+disk-image/git-$(VERSION)-$(BUILD_CODE).pkg: disk-image/VERSION-$(VERSION)-$(ARCH_CODE) symlinks
+ pkgbuild --identifier com.git.pkg --version $(VERSION) --root $(ARTIFACTDIR)$(PREFIX) --scripts assets/scripts --install-location $(PREFIX) --component-plist ./assets/git-components.plist disk-image/git-$(VERSION)-$(BUILD_CODE).pkg
+
+git-%-$(BUILD_CODE).dmg:
+ hdiutil create git-$(VERSION)-$(BUILD_CODE).uncompressed.dmg -fs HFS+ -srcfolder disk-image -volname "Git $(VERSION) Intel $(ARCH)" -ov
+ hdiutil convert -format UDZO -o $@ git-$(VERSION)-$(BUILD_CODE).uncompressed.dmg
+ rm -f git-$(VERSION)-$(BUILD_CODE).uncompressed.dmg
+
+payload: $(BUILD_DIR)/git-$(VERSION)/osx-installed $(BUILD_DIR)/git-$(VERSION)/osx-built-assert-$(ARCH_CODE)
+
+pkg: disk-image/git-$(VERSION)-$(BUILD_CODE).pkg
+
+image: git-$(VERSION)-$(BUILD_CODE).dmg
diff --git a/.github/macos-installer/assets/etc/gitconfig.osxkeychain b/.github/macos-installer/assets/etc/gitconfig.osxkeychain
new file mode 100644
index 00000000000000..788266b3a40a9d
--- /dev/null
+++ b/.github/macos-installer/assets/etc/gitconfig.osxkeychain
@@ -0,0 +1,2 @@
+[credential]
+ helper = osxkeychain
diff --git a/.github/macos-installer/assets/git-components.plist b/.github/macos-installer/assets/git-components.plist
new file mode 100644
index 00000000000000..78db36777df3ed
--- /dev/null
+++ b/.github/macos-installer/assets/git-components.plist
@@ -0,0 +1,18 @@
+
+
+
+
+
+ BundleHasStrictIdentifier
+
+ BundleIsRelocatable
+
+ BundleIsVersionChecked
+
+ BundleOverwriteAction
+ upgrade
+ RootRelativeBundlePath
+ git/share/git-gui/lib/Git Gui.app
+
+
+
diff --git a/.github/macos-installer/assets/scripts/postinstall b/.github/macos-installer/assets/scripts/postinstall
new file mode 100755
index 00000000000000..94056db9b7b864
--- /dev/null
+++ b/.github/macos-installer/assets/scripts/postinstall
@@ -0,0 +1,62 @@
+#!/bin/bash
+INSTALL_DST="$2"
+SCALAR_C_CMD="$INSTALL_DST/git/bin/scalar"
+SCALAR_DOTNET_CMD="/usr/local/scalar/scalar"
+SCALAR_UNINSTALL_SCRIPT="/usr/local/scalar/uninstall_scalar.sh"
+
+function cleanupScalar()
+{
+ echo "checking whether Scalar was installed"
+ if [ ! -f "$SCALAR_C_CMD" ]; then
+ echo "Scalar not installed; exiting..."
+ return 0
+ fi
+ echo "Scalar is installed!"
+
+ echo "looking for Scalar.NET"
+ if [ ! -f "$SCALAR_DOTNET_CMD" ]; then
+ echo "Scalar.NET not found; exiting..."
+ return 0
+ fi
+ echo "Scalar.NET found!"
+
+ currentUser=$(echo "show State:/Users/ConsoleUser" | scutil | awk '/Name :/ { print $3 }')
+
+ # Re-register Scalar.NET repositories with the newly-installed Scalar
+ for repo in $($SCALAR_DOTNET_CMD list); do
+ (
+ PATH="$INSTALL_DST/git/bin:$PATH"
+ sudo -u "$currentUser" scalar register $repo || \
+ echo "warning: skipping re-registration of $repo"
+ )
+ done
+
+ # Uninstall Scalar.NET
+ echo "removing Scalar.NET"
+
+ # Add /usr/local/bin to path - default install location of Homebrew
+ PATH="/usr/local/bin:$PATH"
+ if (sudo -u "$currentUser" brew list --cask scalar); then
+ # Remove from Homebrew
+ sudo -u "$currentUser" brew remove --cask scalar || echo "warning: Scalar.NET uninstall via Homebrew completed with code $?"
+ echo "Scalar.NET uninstalled via Homebrew!"
+ elif (sudo -u "$currentUser" brew list --cask scalar-azrepos); then
+ sudo -u "$currentUser" brew remove --cask scalar-azrepos || echo "warning: Scalar.NET with GVFS uninstall via Homebrew completed with code $?"
+ echo "Scalar.NET with GVFS uninstalled via Homebrew!"
+ elif [ -f $SCALAR_UNINSTALL_SCRIPT ]; then
+ # If not installed with Homebrew, manually remove package
+ sudo -S sh $SCALAR_UNINSTALL_SCRIPT || echo "warning: Scalar.NET uninstall completed with code $?"
+ echo "Scalar.NET uninstalled!"
+ else
+ echo "warning: Scalar.NET uninstall script not found"
+ fi
+
+ # Re-create the Scalar symlink, in case it was removed by the Scalar.NET uninstall operation
+ mkdir -p $INSTALL_DST/bin
+ /bin/ln -Fs "$SCALAR_C_CMD" "$INSTALL_DST/bin/scalar"
+}
+
+# Run Scalar cleanup (will exit if not applicable)
+cleanupScalar
+
+exit 0
\ No newline at end of file
diff --git a/.github/macos-installer/assets/uninstall.sh b/.github/macos-installer/assets/uninstall.sh
new file mode 100755
index 00000000000000..4fc79fbaa2e652
--- /dev/null
+++ b/.github/macos-installer/assets/uninstall.sh
@@ -0,0 +1,34 @@
+#!/bin/bash -e
+if [ ! -r "/usr/local/git" ]; then
+ echo "Git doesn't appear to be installed via this installer. Aborting"
+ exit 1
+fi
+
+if [ "$1" != "--yes" ]; then
+ echo "This will uninstall git by removing /usr/local/git/, and symlinks"
+ printf "Type 'yes' if you are sure you wish to continue: "
+ read response
+else
+ response="yes"
+fi
+
+if [ "$response" == "yes" ]; then
+ # remove all of the symlinks we've created
+ pkgutil --files com.git.pkg | grep bin | while read f; do
+ if [ -L /usr/local/$f ]; then
+ sudo rm /usr/local/$f
+ fi
+ done
+
+ # forget receipts.
+ pkgutil --packages | grep com.git.pkg | xargs -I {} sudo pkgutil --forget {}
+ echo "Uninstalled"
+
+ # The guts all go here.
+ sudo rm -rf /usr/local/git/
+else
+ echo "Aborted"
+ exit 1
+fi
+
+exit 0
diff --git a/.github/scripts/run-esrp-signing.py b/.github/scripts/run-esrp-signing.py
new file mode 100644
index 00000000000000..725bf4580f5f1b
--- /dev/null
+++ b/.github/scripts/run-esrp-signing.py
@@ -0,0 +1,135 @@
+import argparse
+import json
+import os
+import glob
+import pprint
+import subprocess
+import sys
+import re
+
+parser = argparse.ArgumentParser(description='Sign binaries for macOS')
+parser.add_argument('path', help='Path to file for signing')
+parser.add_argument('keycode', help='Platform-specific key code for signing')
+parser.add_argument('opcode', help='Platform-specific operation code for signing')
+# Setting nargs=argparse.REMAINDER allows us to pass in params that begin with `--`
+parser.add_argument('--params', nargs=argparse.REMAINDER, help='Parameters for signing')
+args = parser.parse_args()
+
+esrp_tool = os.path.join("esrp", "tools", "EsrpClient.exe")
+
+aad_id = os.environ['AZURE_AAD_ID'].strip()
+workspace = os.environ['GITHUB_WORKSPACE'].strip()
+
+source_location = args.path
+files = glob.glob(os.path.join(source_location, "*"))
+
+print("Found files:")
+pprint.pp(files)
+
+auth_json = {
+ "Version": "1.0.0",
+ "AuthenticationType": "AAD_CERT",
+ "TenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47",
+ "ClientId": f"{aad_id}",
+ "AuthCert": {
+ "SubjectName": f"CN={aad_id}.microsoft.com",
+ "StoreLocation": "LocalMachine",
+ "StoreName": "My",
+ "SendX5c" : "true"
+ },
+ "RequestSigningCert": {
+ "SubjectName": f"CN={aad_id}",
+ "StoreLocation": "LocalMachine",
+ "StoreName": "My"
+ }
+}
+
+input_json = {
+ "Version": "1.0.0",
+ "SignBatches": [
+ {
+ "SourceLocationType": "UNC",
+ "SourceRootDirectory": source_location,
+ "DestinationLocationType": "UNC",
+ "DestinationRootDirectory": workspace,
+ "SignRequestFiles": [],
+ "SigningInfo": {
+ "Operations": [
+ {
+ "KeyCode": f"{args.keycode}",
+ "OperationCode": f"{args.opcode}",
+ "Parameters": {},
+ "ToolName": "sign",
+ "ToolVersion": "1.0",
+ }
+ ]
+ }
+ }
+ ]
+}
+
+# add files to sign
+for f in files:
+ name = os.path.basename(f)
+ input_json["SignBatches"][0]["SignRequestFiles"].append(
+ {
+ "SourceLocation": name,
+ "DestinationLocation": os.path.join("signed", name),
+ }
+ )
+
+# add parameters to input.json (e.g. enabling the hardened runtime for macOS)
+if args.params is not None:
+ i = 0
+ while i < len(args.params):
+ input_json["SignBatches"][0]["SigningInfo"]["Operations"][0]["Parameters"][args.params[i]] = args.params[i + 1]
+ i += 2
+
+policy_json = {
+ "Version": "1.0.0",
+ "Intent": "production release",
+ "ContentType": "binary",
+}
+
+configs = [
+ ("auth.json", auth_json),
+ ("input.json", input_json),
+ ("policy.json", policy_json),
+]
+
+for filename, data in configs:
+ with open(filename, 'w') as fp:
+ json.dump(data, fp)
+
+# Run ESRP Client
+esrp_out = "esrp_out.json"
+result = subprocess.run(
+ [esrp_tool, "sign",
+ "-a", "auth.json",
+ "-i", "input.json",
+ "-p", "policy.json",
+ "-o", esrp_out,
+ "-l", "Verbose"],
+ capture_output=True,
+ text=True,
+ cwd=workspace)
+
+# Scrub log before printing
+log = re.sub(r'^.+Uploading.*to\s*destinationUrl\s*(.+?),.+$',
+ '***',
+ result.stdout,
+ flags=re.IGNORECASE|re.MULTILINE)
+print(log)
+
+if result.returncode != 0:
+ print("Failed to run ESRPClient.exe")
+ sys.exit(1)
+
+if os.path.isfile(esrp_out):
+ print("ESRP output json:")
+ with open(esrp_out, 'r') as fp:
+ pprint.pp(json.load(fp))
+
+for file in files:
+ if os.path.isfile(os.path.join("signed", file)):
+ print(f"Success!\nSigned {file}")
\ No newline at end of file
diff --git a/.github/scripts/set-up-esrp.ps1 b/.github/scripts/set-up-esrp.ps1
new file mode 100644
index 00000000000000..ca56266e33f553
--- /dev/null
+++ b/.github/scripts/set-up-esrp.ps1
@@ -0,0 +1,12 @@
+# Install ESRP client
+az storage blob download --file esrp.zip --auth-mode login --account-name esrpsigningstorage --container signing-resources --name microsoft.esrpclient.1.2.76.nupkg
+Expand-Archive -Path esrp.zip -DestinationPath .\esrp
+
+# Install certificates
+az keyvault secret download --vault-name "$env:AZURE_VAULT" --name "$env:AUTH_CERT" --file out.pfx
+certutil -f -importpfx out.pfx
+Remove-Item out.pfx
+
+az keyvault secret download --vault-name "$env:AZURE_VAULT" --name "$env:REQUEST_SIGNING_CERT" --file out.pfx
+certutil -f -importpfx out.pfx
+Remove-Item out.pfx
\ No newline at end of file
diff --git a/.github/scripts/symlink-git-hardlinks.rb b/.github/scripts/symlink-git-hardlinks.rb
new file mode 100644
index 00000000000000..174802ccc85d93
--- /dev/null
+++ b/.github/scripts/symlink-git-hardlinks.rb
@@ -0,0 +1,19 @@
+#!/usr/bin/env ruby
+
+install_prefix = ARGV[0]
+puts install_prefix
+git_binary = File.join(install_prefix, '/usr/local/git/bin/git')
+
+[
+ ['git' , File.join(install_prefix, '/usr/local/git/bin')],
+ ['../../bin/git', File.join(install_prefix, '/usr/local/git/libexec/git-core')]
+].each do |link, path|
+ Dir.glob(File.join(path, '*')).each do |file|
+ next if file == git_binary
+ puts "#{file} #{File.size(file)} == #{File.size(git_binary)}"
+ next unless File.size(file) == File.size(git_binary)
+ puts "Symlinking #{file}"
+ puts `ln -sf #{link} #{file}`
+ exit $?.exitstatus if $?.exitstatus != 0
+ end
+end
\ No newline at end of file
diff --git a/.github/workflows/build-git-installers.yml b/.github/workflows/build-git-installers.yml
new file mode 100644
index 00000000000000..0fd5df2888540c
--- /dev/null
+++ b/.github/workflows/build-git-installers.yml
@@ -0,0 +1,840 @@
+name: build-git-installers
+
+on:
+ push:
+ tags:
+ - 'v[0-9]*vfs*' # matches "vvfs"
+
+jobs:
+ # Check prerequisites for the workflow
+ prereqs:
+ runs-on: ubuntu-latest
+ env:
+ AZ_SUB: ${{ secrets.AZURE_SUBSCRIPTION }}
+ AZ_CREDS: ${{ secrets.AZURE_CREDENTIALS }}
+ outputs:
+ tag_name: ${{ steps.tag.outputs.name }} # The full name of the tag, e.g. v2.32.0.vfs.0.0
+ tag_version: ${{ steps.tag.outputs.version }} # The version number (without preceding "v"), e.g. 2.32.0.vfs.0.0
+ deb_signable: ${{ steps.deb.outputs.signable }} # Whether the credentials needed to sign the .deb package are available
+ steps:
+ - name: Determine tag to build
+ run: |
+ echo "::set-output name=name::${GITHUB_REF#refs/tags/}"
+ echo "::set-output name=version::${GITHUB_REF#refs/tags/v}"
+ id: tag
+ - name: Determine whether signing certificates are present
+ run: echo "::set-output name=signable::$([[ $AZ_SUB != '' && $AZ_CREDS != '' ]] && echo 'true' || echo 'false')"
+ id: deb
+ - name: Clone git
+ uses: actions/checkout@v2
+ - name: Validate the tag identified with trigger
+ run: |
+ die () {
+ echo "::error::$*" >&2
+ exit 1
+ }
+
+ # `actions/checkout` only downloads the peeled tag (i.e. the commit)
+ git fetch origin +$GITHUB_REF:$GITHUB_REF
+
+ # Verify that the tag is annotated
+ test $(git cat-file -t "$GITHUB_REF") == "tag" || die "Tag ${{ steps.tag.outputs.name }} is not annotated"
+
+ # Verify tag follows rules in GIT-VERSION-GEN (i.e., matches the specified "DEF_VER" in
+ # GIT-VERSION-FILE) and matches tag determined from trigger
+ make GIT-VERSION-FILE
+ test "${{ steps.tag.outputs.version }}" == "$(sed -n 's/^GIT_VERSION = //p'< GIT-VERSION-FILE)" || die "GIT-VERSION-FILE tag does not match ${{ steps.tag.outputs.name }}"
+ # End check prerequisites for the workflow
+
+ # Build Windows installers (x86_64 installer & portable)
+ windows_pkg:
+ runs-on: windows-2019
+ needs: prereqs
+ env:
+ GPG_OPTIONS: "--batch --yes --no-tty --list-options no-show-photos --verify-options no-show-photos --pinentry-mode loopback"
+ HOME: "${{github.workspace}}\\home"
+ USERPROFILE: "${{github.workspace}}\\home"
+ steps:
+ - name: Configure user
+ shell: bash
+ run:
+ USER_NAME="${{github.actor}}" &&
+ USER_EMAIL="${{github.actor}}@users.noreply.github.com" &&
+ mkdir -p "$HOME" &&
+ git config --global user.name "$USER_NAME" &&
+ git config --global user.email "$USER_EMAIL" &&
+ echo "PACKAGER=$USER_NAME <$USER_EMAIL>" >>$GITHUB_ENV
+ - uses: git-for-windows/setup-git-for-windows-sdk@v1
+ with:
+ flavor: build-installers
+ - name: Clone build-extra
+ shell: bash
+ run: |
+ git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
+ - name: Clone git
+ shell: bash
+ run: |
+ # Since we cannot directly clone a specified tag (as we would a branch with `git clone -b `),
+ # this clone has to be done manually (via init->fetch->reset).
+
+ tag_name="${{ needs.prereqs.outputs.tag_name }}" &&
+ git -c init.defaultBranch=main init &&
+ git remote add -f origin https://github.com/git-for-windows/git &&
+ git fetch "https://github.com/${{github.repository}}" refs/tags/${tag_name}:refs/tags/${tag_name} &&
+ git reset --hard ${tag_name}
+ - name: Prepare home directory for code-signing
+ env:
+ CODESIGN_P12: ${{secrets.CODESIGN_P12}}
+ CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
+ if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+ shell: bash
+ run: |
+ cd home &&
+ mkdir -p .sig &&
+ echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >.sig/codesign.p12 &&
+ echo -n "$CODESIGN_PASS" >.sig/codesign.pass
+ git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
+ - name: Prepare home directory for GPG signing
+ if: env.GPGKEY != ''
+ shell: bash
+ run: |
+ # This section ensures that the identity for the GPG key matches the git user identity, otherwise
+ # signing will fail
+
+ echo '${{secrets.PRIVGPGKEY}}' | tr % '\n' | gpg $GPG_OPTIONS --import &&
+ info="$(gpg --list-keys --with-colons "${GPGKEY%% *}" | cut -d : -f 1,10 | sed -n '/^uid/{s|uid:||p;q}')" &&
+ git config --global user.name "${info% <*}" &&
+ git config --global user.email "<${info#*<}"
+ env:
+ GPGKEY: ${{secrets.GPGKEY}}
+ - name: Build mingw-w64-x86_64-git
+ env:
+ GPGKEY: "${{secrets.GPGKEY}}"
+ shell: bash
+ run: |
+ set -x
+
+ # Make sure that there is a `/usr/bin/git` that can be used by `makepkg-mingw`
+ printf '#!/bin/sh\n\nexec /mingw64/bin/git.exe "$@"\n' >/usr/bin/git &&
+
+ # Restrict `PATH` to MSYS2 and to Visual Studio (to let `cv2pdb` find the relevant DLLs)
+ PATH="/mingw64/bin:/usr/bin:/C/Program Files (x86)/Microsoft Visual Studio 14.0/VC/bin/amd64:/C/Windows/system32"
+
+ type -p mspdb140.dll || exit 1
+
+ sh -x /usr/src/build-extra/please.sh build-mingw-w64-git --only-64-bit --build-src-pkg -o artifacts HEAD &&
+ if test -n "$GPGKEY"
+ then
+ for tar in artifacts/*.tar*
+ do
+ /usr/src/build-extra/gnupg-with-gpgkey.sh --detach-sign --no-armor $tar
+ done
+ fi &&
+
+ b=$PWD/artifacts &&
+ version=${{ needs.prereqs.outputs.tag_name }} &&
+ (cd /usr/src/MINGW-packages/mingw-w64-git &&
+ cp PKGBUILD.$version PKGBUILD &&
+ git commit -s -m "mingw-w64-git: new version ($version)" PKGBUILD &&
+ git bundle create "$b"/MINGW-packages.bundle origin/main..main)
+ - name: Publish mingw-w64-x86_64-git
+ uses: actions/upload-artifact@v2
+ with:
+ name: pkg-x86_64
+ path: artifacts
+ windows_artifacts:
+ runs-on: windows-2019
+ needs: [prereqs, windows_pkg]
+ env:
+ HOME: "${{github.workspace}}\\home"
+ strategy:
+ matrix:
+ artifact:
+ - name: installer
+ fileprefix: Git
+ - name: portable
+ fileprefix: PortableGit
+ fail-fast: false
+ steps:
+ - name: Download pkg-x86_64
+ uses: actions/download-artifact@v2
+ with:
+ name: pkg-x86_64
+ path: pkg-x86_64
+ - uses: git-for-windows/setup-git-for-windows-sdk@v1
+ with:
+ flavor: build-installers
+ - name: Clone build-extra
+ shell: bash
+ run: |
+ git clone --filter=blob:none --single-branch -b main https://github.com/git-for-windows/build-extra /usr/src/build-extra
+ - name: Prepare home directory for code-signing
+ env:
+ CODESIGN_P12: ${{secrets.CODESIGN_P12}}
+ CODESIGN_PASS: ${{secrets.CODESIGN_PASS}}
+ if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+ shell: bash
+ run: |
+ mkdir -p home/.sig &&
+ echo -n "$CODESIGN_P12" | tr % '\n' | base64 -d >home/.sig/codesign.p12 &&
+ echo -n "$CODESIGN_PASS" >home/.sig/codesign.pass &&
+ git config --global alias.signtool '!sh "/usr/src/build-extra/signtool.sh"'
+ - name: Retarget auto-update to microsoft/git
+ shell: bash
+ run: |
+ set -x
+
+ b=/usr/src/build-extra &&
+
+ filename=$b/git-update-git-for-windows.config
+ tr % '\t' >$filename <<-\EOF &&
+ [update]
+ %fromFork = microsoft/git
+ EOF
+
+ sed -i -e '/^#include "file-list.iss"/a\
+ Source: {#SourcePath}\\..\\git-update-git-for-windows.config; DestDir: {app}\\mingw64\\bin; Flags: replacesameversion; AfterInstall: DeleteFromVirtualStore' \
+ -e '/^Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}$/i\
+ Type: files; Name: {app}\\{#MINGW_BITNESS}\\bin\\git-update-git-for-windows.config\
+ Type: dirifempty; Name: {app}\\{#MINGW_BITNESS}\\bin' \
+ $b/installer/install.iss
+ - name: Set alerts to continue until upgrade is taken
+ shell: bash
+ run: |
+ set -x
+
+ b=/mingw64/bin &&
+
+ sed -i -e '6 a use_recently_seen=no' \
+ $b/git-update-git-for-windows
+ - name: Set the installer Publisher to the Git Fundamentals team
+ shell: bash
+ run: |
+ b=/usr/src/build-extra &&
+ sed -i -e 's/^\(AppPublisher=\).*/\1The Git Fundamentals Team at GitHub/' $b/installer/install.iss
+ - name: Let the installer configure Visual Studio to use the installed Git
+ shell: bash
+ run: |
+ set -x
+
+ b=/usr/src/build-extra &&
+
+ sed -i -e '/^ *InstallAutoUpdater();$/a\
+ CustomPostInstall();' \
+ -e '/^ *UninstallAutoUpdater();$/a\
+ CustomPostUninstall();' \
+ $b/installer/install.iss &&
+
+ cat >>$b/installer/helpers.inc.iss <<\EOF
+
+ procedure CustomPostInstall();
+ begin
+ if not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) or
+ not RegWriteStringValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath',ExpandConstant('{app}')) then
+ LogError('Could not register TeamFoundation\GitSourceControl');
+ end;
+
+ procedure CustomPostUninstall();
+ begin
+ if not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\15.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\16.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\17.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\18.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\19.0\TeamFoundation\GitSourceControl','GitPath') or
+ not RegDeleteValue(HKEY_CURRENT_USER,'Software\Microsoft\VSCommon\20.0\TeamFoundation\GitSourceControl','GitPath') then
+ LogError('Could not register TeamFoundation\GitSourceControl');
+ end;
+ EOF
+ - name: Enable Scalar/C and the auto-updater in the installer by default
+ shell: bash
+ run: |
+ set -x
+
+ b=/usr/src/build-extra &&
+
+ sed -i -e "/ChosenOptions:=''/a\\
+ if (ExpandConstant('{param:components|/}')='/') then begin\n\
+ WizardSelectComponents('autoupdate');\n\
+ #ifdef WITH_SCALAR\n\
+ WizardSelectComponents('scalar');\n\
+ #endif\n\
+ end;" $b/installer/install.iss
+ - name: Build 64-bit ${{matrix.artifact.name}}
+ shell: bash
+ run: |
+ set -x
+
+ # Copy the PDB archive to the directory where `--include-pdbs` expects it
+ b=/usr/src/build-extra &&
+ mkdir -p $b/cached-source-packages &&
+ cp pkg-x86_64/*-pdb* $b/cached-source-packages/ &&
+
+ # Build the installer, embedding PDBs
+ eval $b/please.sh make_installers_from_mingw_w64_git --include-pdbs \
+ --version=${{ needs.prereqs.outputs.tag_version }} \
+ -o artifacts --${{matrix.artifact.name}} \
+ --pkg=pkg-x86_64/mingw-w64-x86_64-git-[0-9]*.tar.xz \
+ --pkg=pkg-x86_64/mingw-w64-x86_64-git-doc-html-[0-9]*.tar.xz &&
+
+ if test portable = '${{matrix.artifact.name}}' && test -n "$(git config alias.signtool)"
+ then
+ git signtool artifacts/PortableGit-*.exe
+ fi &&
+ openssl dgst -sha256 artifacts/${{matrix.artifact.fileprefix}}-*.exe | sed "s/.* //" >artifacts/sha-256.txt
+ - name: Verify that .exe files are code-signed
+ if: env.CODESIGN_P12 != '' && env.CODESIGN_PASS != ''
+ shell: bash
+ run: |
+ PATH=$PATH:"/c/Program Files (x86)/Windows Kits/10/App Certification Kit/" \
+ signtool verify //pa artifacts/${{matrix.artifact.fileprefix}}-*.exe
+ - name: Publish ${{matrix.artifact.name}}-x86_64
+ uses: actions/upload-artifact@v2
+ with:
+ name: win-${{matrix.artifact.name}}-x86_64
+ path: artifacts
+ # End build Windows installers
+
+ # Build and sign Mac OSX installers & upload artifacts
+ osx_build:
+ runs-on: macos-latest
+ needs: prereqs
+ env:
+ # `gettext` is keg-only
+ LDFLAGS: -L/usr/local/opt/gettext/lib
+ CFLAGS: -I/usr/local/opt/gettext/include
+ # To make use of the catalogs...
+ XML_CATALOG_FILES: /usr/local/etc/xml/catalog
+ VERSION: "${{ needs.prereqs.outputs.tag_version }}"
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+
+ - name: Install Git dependencies
+ run: |
+ set -x
+ brew install automake asciidoc xmlto docbook
+ brew link --force gettext
+
+ - name: Build payload
+ run: |
+ # Configure the environment
+ set -x
+ PATH=/usr/local/bin:$PATH
+ export CURL_LDFLAGS=$(curl-config --libs)
+
+ # Write to "version" file to force match with trigger payload version
+ echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
+ make -C git -j$(sysctl -n hw.physicalcpu) GIT-VERSION-FILE dist dist-doc
+
+ export GIT_BUILT_FROM_COMMIT=$(gunzip -c git/git-$VERSION.tar.gz | git get-tar-commit-id) ||
+ die "Could not determine commit for build"
+
+ # Extract tarballs
+ mkdir payload manpages
+ tar -xvf git/git-$VERSION.tar.gz -C payload
+ tar -xvf git/git-manpages-$VERSION.tar.gz -C manpages
+
+ # Lay out payload
+ make -C git/.github/macos-installer V=1 payload
+
+ # This step is necessary because we cannot use the $VERSION
+ # environment variable or the tag_version output from the prereqs
+ # job in the upload-artifact task.
+ mkdir -p build_artifacts
+ cp -R stage/git-intel-x86_64-$VERSION/ build_artifacts
+
+ # We keep a list of executable files because their executable bits are
+ # removed when they are zipped, and we need to re-add.
+ find build_artifacts -type f -a -perm -u=x >executable-files.txt
+
+ - name: Upload macOS artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: tmp.osx-build
+ path: |
+ build_artifacts
+
+ - name: Upload list of executable files
+ uses: actions/upload-artifact@v3
+ with:
+ name: tmp.executable-files
+ path: |
+ executable-files.txt
+
+ osx_sign_payload:
+ # ESRP service requires signing to run on Windows
+ runs-on: windows-latest
+ needs: osx_build
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+
+ - name: Download unsigned build artifiacts
+ uses: actions/download-artifact@v3
+ with:
+ name: tmp.osx-build
+ path: build_artifacts
+
+ - name: Zip unsigned build artifacts
+ shell: pwsh
+ run: |
+ Compress-Archive -Path build_artifacts build_artifacts/build_artifacts.zip
+ cd build_artifacts
+ Get-ChildItem -Exclude build_artifacts.zip | Remove-Item -Recurse -Force
+
+ - uses: azure/login@v1
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - name: Set up ESRP client
+ shell: pwsh
+ env:
+ AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
+ AUTH_CERT: ${{ secrets.AZURE_VAULT_AUTH_CERT_NAME }}
+ REQUEST_SIGNING_CERT: ${{ secrets.AZURE_VAULT_REQUEST_SIGNING_CERT_NAME }}
+ run: |
+ git\.github\scripts\set-up-esrp.ps1
+
+ - name: Run ESRP client
+ shell: pwsh
+ env:
+ AZURE_AAD_ID: ${{ secrets.AZURE_AAD_ID }}
+ APPLE_KEY_CODE: ${{ secrets.APPLE_KEY_CODE }}
+ APPLE_SIGNING_OP_CODE: ${{ secrets.APPLE_SIGNING_OPERATION_CODE }}
+ run: |
+ python git\.github\scripts\run-esrp-signing.py build_artifacts `
+ $env:APPLE_KEY_CODE $env:APPLE_SIGNING_OP_CODE `
+ --params 'Hardening' '--options=runtime'
+
+ - name: Unzip signed build artifacts
+ shell: pwsh
+ run: |
+ Expand-Archive signed/build_artifacts.zip -DestinationPath signed
+ Remove-Item signed/build_artifacts.zip
+
+ - name: Upload signed payload
+ uses: actions/upload-artifact@v3
+ with:
+ name: osx-signed-payload
+ path: |
+ signed
+
+ osx_pack:
+ runs-on: macos-latest
+ needs: [prereqs, osx_sign_payload]
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v3
+ with:
+ path: 'git'
+
+ - name: Download signed artifacts
+ uses: actions/download-artifact@v3
+ with:
+ name: osx-signed-payload
+
+ - name: Download list of executable files
+ uses: actions/download-artifact@v3
+ with:
+ name: tmp.executable-files
+
+ - name: Build macOS pkg
+ env:
+ VERSION: "${{ needs.prereqs.outputs.tag_version }}"
+ run: |
+ # Install findutils to use gxargs below
+ brew install findutils
+
+ # Configure the environment
+ export CURL_LDFLAGS=$(curl-config --libs)
+
+ # Add executable bits and move build_artifacts into
+ # the same directory as Makefile (so that executable bits
+ # will be recognized).
+ gxargs -r -d '\n' chmod a+x &2
+ exit 1
+ }
+
+ echo "${{ needs.prereqs.outputs.tag_version }}" >>git/version
+ make -C git GIT-VERSION-FILE
+
+ VERSION="${{ needs.prereqs.outputs.tag_version }}"
+
+ ARCH="$(dpkg-architecture -q DEB_HOST_ARCH)"
+ if test -z "$ARCH"; then
+ die "Could not determine host architecture!"
+ fi
+
+ PKGNAME="microsoft-git_$VERSION"
+ PKGDIR="$(dirname $(pwd))/$PKGNAME"
+
+ rm -rf "$PKGDIR"
+ mkdir -p "$PKGDIR"
+
+ DESTDIR="$PKGDIR" make -C git -j5 V=1 DEVELOPER=1 \
+ USE_LIBPCRE=1 \
+ NO_CROSS_DIRECTORY_HARDLINKS=1 \
+ ASCIIDOC8=1 ASCIIDOC_NO_ROFF=1 \
+ ASCIIDOC='TZ=UTC asciidoc' \
+ prefix=/usr/local \
+ gitexecdir=/usr/local/lib/git-core \
+ libexecdir=/usr/local/lib/git-core \
+ htmldir=/usr/local/share/doc/git/html \
+ install install-doc install-html
+
+ cd ..
+ mkdir "$PKGNAME/DEBIAN"
+
+ # Based on https://packages.ubuntu.com/xenial/vcs/git
+ cat >"$PKGNAME/DEBIAN/control" <
+ Description: Git client built from the https://github.com/microsoft/git repository,
+ specialized in supporting monorepo scenarios. Includes the Scalar CLI.
+ EOF
+
+ dpkg-deb --build "$PKGNAME"
+
+ mkdir $GITHUB_WORKSPACE/artifacts
+ mv "$PKGNAME.deb" $GITHUB_WORKSPACE/artifacts/
+ - name: Publish unsigned .deb package
+ uses: actions/upload-artifact@v2
+ with:
+ name: deb-package-unsigned
+ path: artifacts/
+ ubuntu_sign-artifacts:
+ runs-on: windows-latest # Must be run on Windows due to ESRP executable OS compatibility
+ needs: [ubuntu_build, prereqs]
+ if: needs.prereqs.outputs.deb_signable == 'true'
+ env:
+ ARTIFACTS_DIR: artifacts
+ steps:
+ - name: Clone repository
+ uses: actions/checkout@v2
+ with:
+ path: 'git'
+ - name: Download unsigned packages
+ uses: actions/download-artifact@v2
+ with:
+ name: deb-package-unsigned
+ path: unsigned
+ - uses: azure/login@v1
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+ - name: Set up ESRP client
+ shell: pwsh
+ env:
+ AZURE_VAULT: ${{ secrets.AZURE_VAULT }}
+ AUTH_CERT: ${{ secrets.AZURE_VAULT_AUTH_CERT_NAME }}
+ REQUEST_SIGNING_CERT: ${{ secrets.AZURE_VAULT_REQUEST_SIGNING_CERT_NAME }}
+ run: |
+ git\.github\scripts\set-up-esrp.ps1
+ - name: Sign package
+ shell: pwsh
+ env:
+ AZURE_AAD_ID: ${{ secrets.AZURE_AAD_ID }}
+ LINUX_KEY_CODE: ${{ secrets.LINUX_KEY_CODE }}
+ LINUX_OP_CODE: ${{ secrets.LINUX_OPERATION_CODE }}
+ run: |
+ python git\.github\scripts\run-esrp-signing.py unsigned $env:LINUX_KEY_CODE $env:LINUX_OP_CODE
+ - name: Upload signed artifact
+ uses: actions/upload-artifact@v2
+ with:
+ name: deb-package-signed
+ path: signed
+ # End build & sign Ubuntu package
+
+ # Validate installers
+ validate-installers:
+ name: Validate installers
+ strategy:
+ matrix:
+ component:
+ - os: ubuntu-latest
+ artifact: deb-package-signed
+ command: git
+ - os: macos-latest
+ artifact: osx-signed-pkg
+ command: git
+ - os: windows-latest
+ artifact: win-installer-x86_64
+ command: $PROGRAMFILES\Git\cmd\git.exe
+ runs-on: ${{ matrix.component.os }}
+ needs: [prereqs, windows_artifacts, osx_publish_dmg, ubuntu_sign-artifacts]
+ steps:
+ - name: Download artifacts
+ uses: actions/download-artifact@v2
+ with:
+ name: ${{ matrix.component.artifact }}
+
+ - name: Install Windows
+ if: contains(matrix.component.os, 'windows')
+ shell: pwsh
+ run: |
+ $exePath = Get-ChildItem -Path ./*.exe | %{$_.FullName}
+ Start-Process -Wait -FilePath "$exePath" -ArgumentList "/SILENT /VERYSILENT /NORESTART /SUPPRESSMSGBOXES /ALLOWDOWNGRADE=1"
+
+ - name: Install Linux
+ if: contains(matrix.component.os, 'ubuntu')
+ run: |
+ debpath=$(find ./*.deb)
+ sudo apt install $debpath
+
+ - name: Install macOS
+ if: contains(matrix.component.os, 'macos')
+ run: |
+ pkgpath=$(find ./*.pkg)
+ sudo installer -pkg $pkgpath -target /
+
+ - name: Validate
+ shell: bash
+ run: |
+ "${{ matrix.component.command }}" --version | sed 's/git version //' >actual
+ echo ${{ needs.prereqs.outputs.tag_version }} >expect
+ cmp expect actual || exit 1
+ # End validate installers
+
+ create-github-release:
+ runs-on: ubuntu-latest
+ needs: [validate-installers]
+ if: |
+ success() ||
+ (needs.ubuntu_sign-artifacts.result == 'skipped' &&
+ needs.osx_publish_dmg.result == 'success' &&
+ needs.windows_artifacts.result == 'success')
+ steps:
+ - name: Download Windows portable installer
+ uses: actions/download-artifact@v2
+ with:
+ name: win-portable-x86_64
+ path: win-portable-x86_64
+ - name: Download Windows x86_64 installer
+ uses: actions/download-artifact@v2
+ with:
+ name: win-installer-x86_64
+ path: win-installer-x86_64
+ - name: Download Mac dmg
+ uses: actions/download-artifact@v2
+ with:
+ name: osx-dmg
+ path: osx-dmg
+ - name: Download Mac pkg
+ uses: actions/download-artifact@v2
+ with:
+ name: osx-signed-pkg
+ path: osx-pkg
+ - name: Download Ubuntu package (signed)
+ if: needs.prereqs.outputs.deb_signable == 'true'
+ uses: actions/download-artifact@v2
+ with:
+ name: deb-package-signed
+ path: deb-package
+ - name: Download Ubuntu package (unsigned)
+ if: needs.prereqs.outputs.deb_signable != 'true'
+ uses: actions/download-artifact@v2
+ with:
+ name: deb-package-unsigned
+ path: deb-package
+ - uses: actions/github-script@v4
+ with:
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ var releaseMetadata = {
+ owner: context.repo.owner,
+ repo: context.repo.repo
+ };
+
+ // Create the release
+ var tagName = "${{ needs.prereqs.outputs.tag_name }}";
+ var createdRelease = await github.repos.createRelease({
+ ...releaseMetadata,
+ draft: true,
+ tag_name: tagName,
+ name: tagName
+ });
+ releaseMetadata.release_id = createdRelease.data.id;
+
+ // Uploads contents of directory to the release created above
+ async function uploadDirectoryToRelease(directory, includeExtensions=[]) {
+ return fs.promises.readdir(directory)
+ .then(async(files) => Promise.all(
+ files.filter(file => {
+ return includeExtensions.length==0 || includeExtensions.includes(path.extname(file).toLowerCase());
+ })
+ .map(async (file) => {
+ var filePath = path.join(directory, file);
+ github.repos.uploadReleaseAsset({
+ ...releaseMetadata,
+ name: file,
+ headers: {
+ "content-length": (await fs.promises.stat(filePath)).size
+ },
+ data: fs.createReadStream(filePath)
+ });
+ }))
+ );
+ }
+
+ await Promise.all([
+ // Upload Windows artifacts
+ uploadDirectoryToRelease('win-installer-x86_64', ['.exe']),
+ uploadDirectoryToRelease('win-portable-x86_64', ['.exe']),
+
+ // Upload Mac artifacts
+ uploadDirectoryToRelease('osx-dmg'),
+ uploadDirectoryToRelease('osx-pkg'),
+
+ // Upload Ubuntu artifacts
+ uploadDirectoryToRelease('deb-package')
+ ]);
diff --git a/.github/workflows/monitor-components.yml b/.github/workflows/monitor-components.yml
deleted file mode 100644
index 9c78d6d3e92445..00000000000000
--- a/.github/workflows/monitor-components.yml
+++ /dev/null
@@ -1,86 +0,0 @@
-name: Monitor component updates
-
-# Git for Windows is a slightly modified subset of MSYS2. Some of its
-# components are maintained by Git for Windows, others by MSYS2. To help
-# keeping the former up to date, this workflow monitors the Atom/RSS feeds
-# and opens new tickets for each new component version.
-
-on:
- schedule:
- - cron: "23 8,11,14,17 * * *"
- workflow_dispatch:
-
-env:
- CHARACTER_LIMIT: 5000
- MAX_AGE: 48h
-
-jobs:
- job:
- runs-on: ubuntu-latest
- environment: monitor-components
- strategy:
- matrix:
- component:
- - label: git
- feed: https://github.com/git/git/tags.atom
- - label: git-lfs
- feed: https://github.com/git-lfs/git-lfs/tags.atom
- - label: gcm-core
- feed: https://github.com/microsoft/git-credential-manager-core/tags.atom
- - label: tig
- feed: https://github.com/jonas/tig/tags.atom
- - label: cygwin
- feed: https://github.com/cygwin/cygwin/releases.atom
- title-pattern: ^(?!.*newlib)
- - label: msys2-runtime-package
- feed: https://github.com/msys2/MSYS2-packages/commits/master/msys2-runtime.atom
- - label: msys2-runtime
- feed: https://github.com/msys2/msys2-runtime/commits/HEAD.atom
- aggregate: true
- - label: openssh
- feed: https://github.com/openssh/openssh-portable/tags.atom
- - label: openssl
- feed: https://github.com/openssl/openssl/tags.atom
- title-pattern: ^(?!.*alpha)
- - label: gnutls
- feed: https://gnutls.org/news.atom
- - label: heimdal
- feed: https://github.com/heimdal/heimdal/tags.atom
- - label: git-sizer
- feed: https://github.com/github/git-sizer/tags.atom
- - label: gitflow
- feed: https://github.com/petervanderdoes/gitflow-avh/tags.atom
- - label: curl
- feed: https://github.com/curl/curl/tags.atom
- - label: libgpg-error
- feed: https://github.com/gpg/libgpg-error/releases.atom
- title-pattern: ^libgpg-error-[0-9\.]*$
- - label: libgcrypt
- feed: https://github.com/gpg/libgcrypt/releases.atom
- title-pattern: ^libgcrypt-[0-9\.]*$
- - label: gpg
- feed: https://github.com/gpg/gnupg/releases.atom
- - label: mintty
- feed: https://github.com/mintty/mintty/releases.atom
- - label: p7zip
- feed: https://sourceforge.net/projects/p7zip/rss?path=/p7zip
- - label: bash
- feed: https://git.savannah.gnu.org/cgit/bash.git/atom/?h=master
- aggregate: true
- - label: perl
- feed: https://github.com/Perl/perl5/tags.atom
- title-pattern: ^(?!.*(5\.[0-9]+[13579]|RC))
- - label: pcre2
- feed: https://github.com/PCRE2Project/pcre2/tags.atom
- fail-fast: false
- steps:
- - uses: git-for-windows/rss-to-issues@v0
- with:
- feed: ${{matrix.component.feed}}
- prefix: "[New ${{matrix.component.label}} version]"
- labels: component-update
- github-token: ${{ secrets.MONITOR_COMPONENTS_PAT }}
- character-limit: ${{ env.CHARACTER_LIMIT }}
- max-age: ${{ env.MAX_AGE }}
- aggregate: ${{matrix.component.aggregate}}
- title-pattern: ${{matrix.component.title-pattern}}
diff --git a/.github/workflows/release-apt-get.yml b/.github/workflows/release-apt-get.yml
new file mode 100644
index 00000000000000..454cdde2a6e50d
--- /dev/null
+++ b/.github/workflows/release-apt-get.yml
@@ -0,0 +1,92 @@
+name: "release-apt-get"
+on:
+ release:
+ types: [released]
+
+ workflow_dispatch:
+ inputs:
+ release:
+ description: 'Release Id'
+ required: true
+ default: 'latest'
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: azure/login@v1
+ with:
+ creds: ${{ secrets.AZURE_CREDENTIALS }}
+
+ - name: "Download Repo Client"
+ env:
+ AZ_SUB: ${{ secrets.AZURE_SUBSCRIPTION }}
+ run: |
+ az storage blob download --subscription "$AZ_SUB" --account-name esrpsigningstorage -c signing-resources -n azure-repoapi-client_2.0.1_amd64.deb -f repoclient.deb --auth-mode login
+
+ - name: "Install Repo Client"
+ run: |
+ sudo apt-get install python3-adal --yes
+ sudo dpkg -i repoclient.deb
+ rm repoclient.deb
+
+ - name: "Configure Repo Client"
+ uses: actions/github-script@v3
+ env:
+ AZURE_AAD_ID: ${{ secrets.AZURE_AAD_ID }}
+ AAD_CLIENT_SECRET: ${{ secrets.AAD_CLIENT_SECRET }}
+ with:
+ script: |
+ for (const key of ['AZURE_AAD_ID', 'AAD_CLIENT_SECRET']) {
+ if (!process.env[key]) throw new Error(`Required env var ${key} is missing!`)
+ }
+ const config = {
+ AADResource: 'https://microsoft.onmicrosoft.com/945999e9-da09-4b5b-878f-b66c414602c0',
+ AADTenant: '72f988bf-86f1-41af-91ab-2d7cd011db47',
+ AADAuthorityUrl: 'https://login.microsoftonline.com',
+ server: 'azure-apt-cat.cloudapp.net',
+ port: '443',
+ AADClientId: process.env.AZURE_AAD_ID,
+ AADClientSecret: process.env.AAD_CLIENT_SECRET,
+ repositoryId: ''
+ }
+ const fs = require('fs')
+ fs.writeFileSync('config.json', JSON.stringify(config, null, 2))
+
+ - name: "Get Release Asset"
+ id: get-asset
+ env:
+ RELEASE: ${{ github.event.inputs.release }}
+ uses: actions/github-script@v3
+ with:
+ github-token: ${{secrets.GITHUB_TOKEN}}
+ script: |
+ const { data } = await github.repos.getRelease({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ release_id: process.env.RELEASE || 'latest'
+ })
+ const assets = data.assets.filter(asset => asset.name.endsWith('.deb'))
+ if (assets.length !== 1) {
+ throw new Error(`Unexpected number of .deb assets: ${assets.length}`)
+ }
+ const fs = require('fs')
+ const buffer = await github.repos.getReleaseAsset({
+ headers: {
+ accept: 'application/octet-stream'
+ },
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ asset_id: assets[0].id
+ })
+ console.log(buffer)
+ fs.writeFileSync(assets[0].name, Buffer.from(buffer.data))
+ core.setOutput('name', assets[0].name)
+
+ - name: "Publish to apt feed"
+ env:
+ RELEASE: ${{ github.event.inputs.release }}
+ run: |
+ repoclient -v v3 -c config.json package add --check --wait 300 ${{steps.get-asset.outputs.name}} -r ${{ secrets.HIRSUTE_REPO_ID }}
diff --git a/.github/workflows/release-homebrew.yml b/.github/workflows/release-homebrew.yml
new file mode 100644
index 00000000000000..a41ff6127f592d
--- /dev/null
+++ b/.github/workflows/release-homebrew.yml
@@ -0,0 +1,30 @@
+name: Update Homebrew Tap
+on:
+ release:
+ types: [released]
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ steps:
+ - id: version
+ name: Compute version number
+ run: |
+ echo "::set-output name=result::$(echo $GITHUB_REF | sed -e "s/^refs\/tags\/v//")"
+ - id: hash
+ name: Compute release asset hash
+ uses: mjcheetham/asset-hash@v1
+ with:
+ asset: /git-(.*)\.pkg/
+ hash: sha256
+ token: ${{ secrets.GITHUB_TOKEN }}
+ - name: Update scalar Cask
+ uses: mjcheetham/update-homebrew@v1.1
+ with:
+ token: ${{ secrets.HOMEBREW_TOKEN }}
+ tap: microsoft/git
+ name: microsoft-git
+ type: cask
+ version: ${{ steps.version.outputs.result }}
+ sha256: ${{ steps.hash.outputs.result }}
+ alwaysUsePullRequest: false
diff --git a/.github/workflows/release-winget.yml b/.github/workflows/release-winget.yml
new file mode 100644
index 00000000000000..c1f0c5ab282618
--- /dev/null
+++ b/.github/workflows/release-winget.yml
@@ -0,0 +1,23 @@
+name: "release-winget"
+on:
+ release:
+ types: [released]
+
+jobs:
+ release:
+ runs-on: windows-latest
+ steps:
+ - name: Publish manifest with winget-create
+ run: |
+ # Get correct release asset
+ $github = Get-Content '${{ github.event_path }}' | ConvertFrom-Json
+ $asset = $github.release.assets | Where-Object -Property name -match '64-bit.exe$'
+
+ # Remove 'v' and 'vfs' from the version
+ $github.release.tag_name -match '\d.*'
+ $version = $Matches[0] -replace ".vfs",""
+
+ # Download and run wingetcreate
+ Invoke-WebRequest https://aka.ms/wingetcreate/latest -OutFile wingetcreate.exe
+ .\wingetcreate.exe update Microsoft.Git -u $asset.browser_download_url -v $version -o manifests -t "${{ secrets.WINGET_TOKEN }}" -s
+ shell: powershell
diff --git a/.github/workflows/scalar-functional-tests.yml b/.github/workflows/scalar-functional-tests.yml
new file mode 100644
index 00000000000000..1defa314429186
--- /dev/null
+++ b/.github/workflows/scalar-functional-tests.yml
@@ -0,0 +1,220 @@
+name: Scalar Functional Tests
+
+env:
+ SCALAR_REPOSITORY: microsoft/scalar
+ SCALAR_REF: main
+ DEBUG_WITH_TMATE: false
+ SCALAR_TEST_SKIP_VSTS_INFO: true
+
+on:
+ push:
+ branches: [ vfs-*, tentative/vfs-* ]
+ pull_request:
+ branches: [ vfs-*, features/* ]
+
+jobs:
+ scalar:
+ name: "Scalar Functional Tests"
+
+ strategy:
+ fail-fast: false
+ matrix:
+ # Order by runtime (in descending order)
+ os: [windows-2019, macos-10.15, ubuntu-18.04, ubuntu-20.04]
+ # Scalar.NET used to be tested using `features: [false, experimental]`
+ # But currently, Scalar/C ignores `feature.scalar` altogether, so let's
+ # save some electrons and run only one of them...
+ features: [ignored]
+ exclude:
+ # The built-in FSMonitor is not (yet) supported on Linux
+ - os: ubuntu-18.04
+ features: experimental
+ - os: ubuntu-20.04
+ features: experimental
+ runs-on: ${{ matrix.os }}
+
+ env:
+ BUILD_FRAGMENT: bin/Release/netcoreapp3.1
+ GIT_FORCE_UNTRACKED_CACHE: 1
+
+ steps:
+ - name: Check out Git's source code
+ uses: actions/checkout@v2
+
+ - name: Setup build tools on Windows
+ if: runner.os == 'Windows'
+ uses: git-for-windows/setup-git-for-windows-sdk@v1
+
+ - name: Provide a minimal `install` on Windows
+ if: runner.os == 'Windows'
+ shell: bash
+ run: |
+ test -x /usr/bin/install ||
+ tr % '\t' >/usr/bin/install <<-\EOF
+ #!/bin/sh
+
+ cmd=cp
+ while test $# != 0
+ do
+ %case "$1" in
+ %-d) cmd="mkdir -p";;
+ %-m) shift;; # ignore mode
+ %*) break;;
+ %esac
+ %shift
+ done
+
+ exec $cmd "$@"
+ EOF
+
+ - name: Install build dependencies for Git (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ sudo apt-get update
+ sudo apt-get -q -y install libssl-dev libcurl4-openssl-dev gettext
+
+ - name: Build and install Git
+ shell: bash
+ env:
+ NO_TCLTK: Yup
+ run: |
+ # We do require a VFS version
+ def_ver="$(sed -n 's/DEF_VER=\(.*vfs.*\)/\1/p' GIT-VERSION-GEN)"
+ test -n "$def_ver"
+
+ # Ensure that `git version` reflects DEF_VER
+ case "$(git describe --match "v[0-9]*vfs*" HEAD)" in
+ ${def_ver%%.vfs.*}.vfs.*) ;; # okay, we can use this
+ *) git -c user.name=ci -c user.email=ci@github tag -m for-testing ${def_ver}.NNN.g$(git rev-parse --short HEAD);;
+ esac
+
+ SUDO=
+ extra=
+ case "${{ runner.os }}" in
+ Windows)
+ extra=DESTDIR=/c/Progra~1/Git
+ cygpath -aw "/c/Program Files/Git/cmd" >>$GITHUB_PATH
+ ;;
+ Linux)
+ SUDO=sudo
+ extra=prefix=/usr
+ ;;
+ macOS)
+ SUDO=sudo
+ extra=prefix=/usr/local
+ ;;
+ esac
+
+ $SUDO make -j5 $extra install
+
+ - name: Ensure that we use the built Git and Scalar
+ shell: bash
+ run: |
+ type -p git
+ git version
+ case "$(git version)" in *.vfs.*) echo Good;; *) exit 1;; esac
+ type -p scalar
+ scalar version
+ case "$(scalar version 2>&1)" in *.vfs.*) echo Good;; *) exit 1;; esac
+
+ - name: Check out Scalar's source code
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # Indicate full history so Nerdbank.GitVersioning works.
+ path: scalar
+ repository: ${{ env.SCALAR_REPOSITORY }}
+ ref: ${{ env.SCALAR_REF }}
+
+ - name: Setup .NET Core
+ uses: actions/setup-dotnet@v1
+ with:
+ dotnet-version: 3.1.302
+
+ - name: Install dependencies
+ run: dotnet restore
+ working-directory: scalar
+ env:
+ DOTNET_NOLOGO: 1
+
+ - name: Build
+ working-directory: scalar
+ run: dotnet build --configuration Release --no-restore -p:UseAppHost=true # Force generation of executable on macOS.
+
+ - name: Setup platform (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ echo "BUILD_PLATFORM=${{ runner.os }}" >>$GITHUB_ENV
+ echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
+
+ - name: Setup platform (Mac)
+ if: runner.os == 'macOS'
+ run: |
+ echo 'BUILD_PLATFORM=Mac' >>$GITHUB_ENV
+ echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$GITHUB_ENV
+
+ - name: Setup platform (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ echo "BUILD_PLATFORM=${{ runner.os }}" >>$env:GITHUB_ENV
+ echo 'BUILD_FILE_EXT=.exe' >>$env:GITHUB_ENV
+ echo "TRACE2_BASENAME=Trace2.${{ github.run_id }}__${{ github.run_number }}__${{ matrix.os }}__${{ matrix.features }}" >>$env:GITHUB_ENV
+
+ - name: Configure feature.scalar
+ run: git config --global feature.scalar ${{ matrix.features }}
+
+ - id: functional_test
+ name: Functional test
+ timeout-minutes: 60
+ working-directory: scalar
+ shell: bash
+ run: |
+ export GIT_TRACE2_EVENT="$PWD/$TRACE2_BASENAME/Event"
+ export GIT_TRACE2_PERF="$PWD/$TRACE2_BASENAME/Perf"
+ export GIT_TRACE2_EVENT_BRIEF=true
+ export GIT_TRACE2_PERF_BRIEF=true
+ mkdir -p "$TRACE2_BASENAME"
+ mkdir -p "$TRACE2_BASENAME/Event"
+ mkdir -p "$TRACE2_BASENAME/Perf"
+ git version --build-options
+ cd ../out
+ Scalar.FunctionalTests/$BUILD_FRAGMENT/Scalar.FunctionalTests$BUILD_FILE_EXT --test-scalar-on-path --test-git-on-path --timeout=300000 --full-suite
+
+ - name: Force-stop FSMonitor daemons and Git processes (Windows)
+ if: runner.os == 'Windows' && (success() || failure())
+ shell: bash
+ run: |
+ set -x
+ wmic process get CommandLine,ExecutablePath,HandleCount,Name,ParentProcessID,ProcessID
+ wmic process where "CommandLine Like '%fsmonitor--daemon %run'" delete
+ wmic process where "ExecutablePath Like '%git.exe'" delete
+
+ - id: trace2_zip_unix
+ if: runner.os != 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
+ name: Zip Trace2 Logs (Unix)
+ shell: bash
+ working-directory: scalar
+ run: zip -q -r $TRACE2_BASENAME.zip $TRACE2_BASENAME/
+
+ - id: trace2_zip_windows
+ if: runner.os == 'Windows' && ( success() || failure() ) && ( steps.functional_test.conclusion == 'success' || steps.functional_test.conclusion == 'failure' )
+ name: Zip Trace2 Logs (Windows)
+ working-directory: scalar
+ run: Compress-Archive -DestinationPath ${{ env.TRACE2_BASENAME }}.zip -Path ${{ env.TRACE2_BASENAME }}
+
+ - name: Archive Trace2 Logs
+ if: ( success() || failure() ) && ( steps.trace2_zip_unix.conclusion == 'success' || steps.trace2_zip_windows.conclusion == 'success' )
+ uses: actions/upload-artifact@v2
+ with:
+ name: ${{ env.TRACE2_BASENAME }}.zip
+ path: scalar/${{ env.TRACE2_BASENAME }}.zip
+ retention-days: 3
+
+ # The GitHub Action `action-tmate` allows developers to connect to the running agent
+ # using SSH (it will be a `tmux` session; on Windows agents it will be inside the MSYS2
+ # environment in `C:\msys64`, therefore it can be slightly tricky to interact with
+ # Git for Windows, which runs a slightly incompatible MSYS2 runtime).
+ - name: action-tmate
+ if: env.DEBUG_WITH_TMATE == 'true' && failure()
+ uses: mxschmitt/action-tmate@v3
+ with:
+ limit-access-to-actor: true
diff --git a/.gitignore b/.gitignore
index 6ded10067a9e97..3fa9955cd31e0f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
/fuzz-pack-headers
/fuzz-pack-idx
/GIT-BUILD-OPTIONS
+/GIT-BUILT-FROM-COMMIT
/GIT-CFLAGS
/GIT-LDFLAGS
/GIT-PREFIX
@@ -77,6 +78,7 @@
/git-gc
/git-get-tar-commit-id
/git-grep
+/git-gvfs-helper
/git-hash-object
/git-help
/git-hook
@@ -173,6 +175,7 @@
/git-unpack-file
/git-unpack-objects
/git-update-index
+/git-update-microsoft-git
/git-update-ref
/git-update-server-info
/git-upload-archive
diff --git a/BRANCHES.md b/BRANCHES.md
new file mode 100644
index 00000000000000..364158375e7d55
--- /dev/null
+++ b/BRANCHES.md
@@ -0,0 +1,59 @@
+Branches used in this repo
+==========================
+
+The document explains the branching structure that we are using in the VFSForGit repository as well as the forking strategy that we have adopted for contributing.
+
+Repo Branches
+-------------
+
+1. `vfs-#`
+
+ These branches are used to track the specific version that match Git for Windows with the VFSForGit specific patches on top. When a new version of Git for Windows is released, the VFSForGit patches will be rebased on that windows version and a new gvfs-# branch created to create pull requests against.
+
+ #### Examples
+
+ ```
+ vfs-2.27.0
+ vfs-2.30.0
+ ```
+
+ The versions of git for VFSForGit are based on the Git for Windows versions. v2.20.0.vfs.1 will correspond with the v2.20.0.windows.1 with the VFSForGit specific patches applied to the windows version.
+
+2. `vfs-#-exp`
+
+ These branches are for releasing experimental features to early adopters. They
+ should contain everything within the corresponding `vfs-#` branch; if the base
+ branch updates, then merge into the `vfs-#-exp` branch as well.
+
+Tags
+----
+
+We are using annotated tags to build the version number for git. The build will look back through the commit history to find the first tag matching `v[0-9]*vfs*` and build the git version number using that tag.
+
+Full releases are of the form `v2.XX.Y.vfs.Z.W` where `v2.XX.Y` comes from the
+upstream version and `Z.W` are custom updates within our fork. Specifically,
+the `.Z` value represents the "compatibility level" with VFS for Git. Only
+increase this version when making a breaking change with a released version
+of VFS for Git. The `.W` version is used for minor updates between major
+versions.
+
+Experimental releases are of the form `v2.XX.Y.vfs.Z.W.exp`. The `.exp`
+suffix indicates that experimental features are available. The rest of the
+version string comes from the full release tag. These versions will only
+be made available as pre-releases on the releases page, never a full release.
+
+Forking
+-------
+
+A personal fork of this repository and a branch in that repository should be used for development.
+
+These branches should be based on the latest vfs-# branch. If there are work in progress pull requests that you have based on a previous version branch when a new version branch is created, you will need to move your patches to the new branch to get them in that latest version.
+
+#### Example
+
+```
+git clone
+git remote add ms https://github.com/Microsoft/git.git
+git checkout -b my-changes ms/vfs-2.20.0 --no-track
+git push -fu origin HEAD
+```
diff --git a/Documentation/config.txt b/Documentation/config.txt
index 98f697b52d47ca..6e38ece0f82ebd 100644
--- a/Documentation/config.txt
+++ b/Documentation/config.txt
@@ -437,6 +437,8 @@ include::config/gui.txt[]
include::config/guitool.txt[]
+include::config/gvfs.txt[]
+
include::config/help.txt[]
include::config/http.txt[]
diff --git a/Documentation/config/core.txt b/Documentation/config/core.txt
index d52fc67af13efc..63418376f82031 100644
--- a/Documentation/config/core.txt
+++ b/Documentation/config/core.txt
@@ -111,6 +111,14 @@ Version 2 uses an opaque string so that the monitor can return
something that can be used to determine what files have changed
without race conditions.
+core.virtualFilesystem::
+ If set, the value of this variable is used as a command which
+ will identify all files and directories that are present in
+ the working directory. Git will only track and update files
+ listed in the virtual file system. Using the virtual file system
+ will supersede the sparse-checkout settings which will be ignored.
+ See the "virtual file system" section of linkgit:githooks[5].
+
core.trustctime::
If false, the ctime differences between the index and the
working tree are ignored; useful when the inode change time
@@ -728,6 +736,55 @@ core.multiPackIndex::
single index. See linkgit:git-multi-pack-index[1] for more
information. Defaults to true.
+core.gvfs::
+ Enable the features needed for GVFS. This value can be set to true
+ to indicate all features should be turned on or the bit values listed
+ below can be used to turn on specific features.
++
+--
+ GVFS_SKIP_SHA_ON_INDEX::
+ Bit value 1
+ Disables the calculation of the sha when writing the index
+ GVFS_MISSING_OK::
+ Bit value 4
+ Normally git write-tree ensures that the objects referenced by the
+ directory exist in the object database. This option disables this check.
+ GVFS_NO_DELETE_OUTSIDE_SPARSECHECKOUT::
+ Bit value 8
+ When marking entries to remove from the index and the working
+ directory this option will take into account what the
+ skip-worktree bit was set to so that if the entry has the
+ skip-worktree bit set it will not be removed from the working
+ directory. This will allow virtualized working directories to
+ detect the change to HEAD and use the new commit tree to show
+ the files that are in the working directory.
+ GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK::
+ Bit value 16
+ While performing a fetch with a virtual file system we know
+ that there will be missing objects and we don't want to download
+ them just because of the reachability of the commits. We also
+ don't want to download a pack file with commits, trees, and blobs
+ since these will be downloaded on demand. This flag will skip the
+ checks on the reachability of objects during a fetch as well as
+ the upload pack so that extraneous objects don't get downloaded.
+ GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS::
+ Bit value 64
+ With a virtual file system we only know the file size before any
+ CRLF or smudge/clean filters processing is done on the client.
+ To prevent file corruption due to truncation or expansion with
+ garbage at the end, these filters must not run when the file
+ is first accessed and brought down to the client. Git.exe can't
+ currently tell the first access vs subsequent accesses so this
+ flag just blocks them from occurring at all.
+ GVFS_PREFETCH_DURING_FETCH::
+ Bit value 128
+ While performing a `git fetch` command, use the gvfs-helper to
+ perform a "prefetch" of commits and trees.
+--
+
+core.useGvfsHelper::
+ TODO
+
core.sparseCheckout::
Enable "sparse checkout" feature. See linkgit:git-sparse-checkout[1]
for more information.
@@ -749,3 +806,12 @@ core.abbrev::
If set to "no", no abbreviation is made and the object names
are shown in their full length.
The minimum length is 4.
+
+core.configWriteLockTimeoutMS::
+ When processes try to write to the config concurrently, it is likely
+ that one process "wins" and the other process(es) fail to lock the
+ config file. By configuring a timeout larger than zero, Git can be
+ told to try to lock the config again a couple times within the
+ specified timeout. If the timeout is configure to zero (which is the
+ default), Git will fail immediately when the config is already
+ locked.
diff --git a/Documentation/config/gvfs.txt b/Documentation/config/gvfs.txt
new file mode 100644
index 00000000000000..6ab221ded36c91
--- /dev/null
+++ b/Documentation/config/gvfs.txt
@@ -0,0 +1,5 @@
+gvfs.cache-server::
+ TODO
+
+gvfs.sharedcache::
+ TODO
diff --git a/Documentation/config/index.txt b/Documentation/config/index.txt
index 75f3a2d1054146..c65da20a93136f 100644
--- a/Documentation/config/index.txt
+++ b/Documentation/config/index.txt
@@ -1,3 +1,9 @@
+index.deleteSparseDirectories::
+ When enabled, the cone mode sparse-checkout feature will delete
+ directories that are outside of the sparse-checkout cone, unless
+ such a directory contains an untracked, non-ignored file. Defaults
+ to true.
+
index.recordEndOfIndexEntries::
Specifies whether the index file should include an "End Of Index
Entry" section. This reduces index load time on multiprocessor
diff --git a/Documentation/config/status.txt b/Documentation/config/status.txt
index 0fc704ab80b223..af043d7e26f269 100644
--- a/Documentation/config/status.txt
+++ b/Documentation/config/status.txt
@@ -75,3 +75,25 @@ status.submoduleSummary::
the --ignore-submodules=dirty command-line option or the 'git
submodule summary' command, which shows a similar output but does
not honor these settings.
+
+status.deserializePath::
+ EXPERIMENTAL, Pathname to a file containing cached status results
+ generated by `--serialize`. This will be overridden by
+ `--deserialize=` on the command line. If the cache file is
+ invalid or stale, git will fall-back and compute status normally.
+
+status.deserializeWait::
+ EXPERIMENTAL, Specifies what `git status --deserialize` should do
+ if the serialization cache file is stale and whether it should
+ fall-back and compute status normally. This will be overridden by
+ `--deserialize-wait=` on the command line.
++
+--
+* `fail` - cause git to exit with an error when the status cache file
+is stale; this is intended for testing and debugging.
+* `block` - cause git to spin and periodically retry the cache file
+every 100 ms; this is intended to help coordinate with another git
+instance concurrently computing the cache file.
+* `no` - to immediately fall-back if cache file is stale. This is the default.
+* `` - time (in tenths of a second) to spin and retry.
+--
diff --git a/Documentation/git-status.txt b/Documentation/git-status.txt
index 54a4b29b473cc4..f6196b47bb4bf4 100644
--- a/Documentation/git-status.txt
+++ b/Documentation/git-status.txt
@@ -149,6 +149,21 @@ ignored, then the directory is not shown, but all contents are shown.
threshold.
See also linkgit:git-diff[1] `--find-renames`.
+--serialize[=]::
+ (EXPERIMENTAL) Serialize raw status results to a file or stdout
+ in a format suitable for use by `--deserialize`. If a path is
+ given, serialize data will be written to that path *and* normal
+ status output will be written to stdout. If path is omitted,
+ only binary serialization data will be written to stdout.
+
+--deserialize[=]::
+ (EXPERIMENTAL) Deserialize raw status results from a file or
+ stdin rather than scanning the worktree. If `` is omitted
+ and `status.deserializePath` is unset, input is read from stdin.
+--no-deserialize::
+ (EXPERIMENTAL) Disable implicit deserialization of status results
+ from the value of `status.deserializePath`.
+
...::
See the 'pathspec' entry in linkgit:gitglossary[7].
@@ -421,6 +436,26 @@ quoted as explained for the configuration variable `core.quotePath`
(see linkgit:git-config[1]).
+SERIALIZATION and DESERIALIZATION (EXPERIMENTAL)
+------------------------------------------------
+
+The `--serialize` option allows git to cache the result of a
+possibly time-consuming status scan to a binary file. A local
+service/daemon watching file system events could use this to
+periodically pre-compute a fresh status result.
+
+Interactive users could then use `--deserialize` to simply
+(and immediately) print the last-known-good result without
+waiting for the status scan.
+
+The binary serialization file format includes some worktree state
+information allowing `--deserialize` to reject the cached data
+and force a normal status scan if, for example, the commit, branch,
+or status modes/options change. The format cannot, however, indicate
+when the cached data is otherwise stale -- that coordination belongs
+to the task driving the serializations.
+
+
CONFIGURATION
-------------
diff --git a/Documentation/git-update-microsoft-git.txt b/Documentation/git-update-microsoft-git.txt
new file mode 100644
index 00000000000000..724bfc172f8ab7
--- /dev/null
+++ b/Documentation/git-update-microsoft-git.txt
@@ -0,0 +1,24 @@
+git-update-microsoft-git(1)
+===========================
+
+NAME
+----
+git-update-microsoft-git - Update the installed version of Git
+
+
+SYNOPSIS
+--------
+[verse]
+'git update-microsoft-git'
+
+DESCRIPTION
+-----------
+This version of Git is based on the Microsoft fork of Git, which
+has custom capabilities focused on supporting monorepos. This
+command checks for the latest release of that fork and installs
+it on your machine.
+
+
+GIT
+---
+Part of the linkgit:git[1] suite
diff --git a/Documentation/githooks.txt b/Documentation/githooks.txt
index a16e62bc8c8ea7..135f0bd34f232b 100644
--- a/Documentation/githooks.txt
+++ b/Documentation/githooks.txt
@@ -698,6 +698,26 @@ and "0" meaning they were not.
Only one parameter should be set to "1" when the hook runs. The hook
running passing "1", "1" should not be possible.
+virtualFilesystem
+~~~~~~~~~~~~~~~~~~
+
+"Virtual File System" allows populating the working directory sparsely.
+The projection data is typically automatically generated by an external
+process. Git will limit what files it checks for changes as well as which
+directories are checked for untracked files based on the path names given.
+Git will also only update those files listed in the projection.
+
+The hook is invoked when the configuration option core.virtualFilesystem
+is set. It takes one argument, a version (currently 1).
+
+The hook should output to stdout the list of all files in the working
+directory that git should track. The paths are relative to the root
+of the working directory and are separated by a single NUL. Full paths
+('dir1/a.txt') as well as directories are supported (ie 'dir1/').
+
+The exit status determines whether git will use the data from the
+hook. On error, git will abort the command with an error message.
+
SEE ALSO
--------
linkgit:git-hook[1]
diff --git a/Documentation/scalar.txt b/Documentation/scalar.txt
index f33436c7f65ff9..0424d020d9b9f4 100644
--- a/Documentation/scalar.txt
+++ b/Documentation/scalar.txt
@@ -8,7 +8,9 @@ scalar - A tool for managing large Git repositories
SYNOPSIS
--------
[verse]
-scalar clone [--single-branch] [--branch ] [--full-clone] []
+scalar clone [--single-branch] [--branch ] [--full-clone]
+ [--local-cache-path ] [--cache-server-url ]
+ []
scalar list
scalar register []
scalar unregister []
@@ -16,6 +18,7 @@ scalar run ( all | config | commit-graph | fetch | loose-objects | pack-files )
scalar reconfigure [ --all | ]
scalar diagnose []
scalar delete
+scalar cache-server ( --get | --set | --list [] ) []
DESCRIPTION
-----------
@@ -84,6 +87,17 @@ cloning. If the HEAD at the remote did not point at any branch when
A sparse-checkout is initialized by default. This behavior can be
turned off via `--full-clone`.
+--local-cache-path ::
+ Override the path to the local cache root directory; Pre-fetched objects
+ are stored into a repository-dependent subdirectory of that path.
++
+The default is `:\.scalarCache` on Windows (on the same drive as the
+clone), and `~/.scalarCache` on macOS.
+
+--cache-server-url ::
+ Retrieve missing objects from the specified remote, which is expected to
+ understand the GVFS protocol.
+
List
~~~~
@@ -157,6 +171,27 @@ delete ::
This subcommand lets you delete an existing Scalar enlistment from your
local file system, unregistering the repository.
+Cache-server
+~~~~~~~~~~~~
+
+cache-server ( --get | --set | --list [] ) []::
+ This command lets you query or set the GVFS-enabled cache server used
+ to fetch missing objects.
+
+--get::
+ This is the default command mode: query the currently-configured cache
+ server URL, if any.
+
+--list::
+ Access the `gvfs/info` endpoint of the specified remote (default:
+ `origin`) to figure out which cache servers are available, if any.
++
+In contrast to the `--get` command mode (which only accesses the local
+repository), this command mode triggers a request via the network that
+potentially requires authentication. If authentication is required, the
+configured credential helper is employed (see linkgit:git-credential[1]
+for details).
+
SEE ALSO
--------
linkgit:git-clone[1], linkgit:git-maintenance[1].
diff --git a/Documentation/technical/read-object-protocol.txt b/Documentation/technical/read-object-protocol.txt
new file mode 100644
index 00000000000000..a893b46e7c28a9
--- /dev/null
+++ b/Documentation/technical/read-object-protocol.txt
@@ -0,0 +1,102 @@
+Read Object Process
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The read-object process enables Git to read all missing blobs with a
+single process invocation for the entire life of a single Git command.
+This is achieved by using a packet format (pkt-line, see technical/
+protocol-common.txt) based protocol over standard input and standard
+output as follows. All packets, except for the "*CONTENT" packets and
+the "0000" flush packet, are considered text and therefore are
+terminated by a LF.
+
+Git starts the process when it encounters the first missing object that
+needs to be retrieved. After the process is started, Git sends a welcome
+message ("git-read-object-client"), a list of supported protocol version
+numbers, and a flush packet. Git expects to read a welcome response
+message ("git-read-object-server"), exactly one protocol version number
+from the previously sent list, and a flush packet. All further
+communication will be based on the selected version.
+
+The remaining protocol description below documents "version=1". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-read-object-client
+packet: git> version=1
+packet: git> version=42
+packet: git> 0000
+packet: git< git-read-object-server
+packet: git< version=1
+packet: git< 0000
+packet: git> capability=get
+packet: git> capability=have
+packet: git> capability=put
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=get
+packet: git< 0000
+------------------------
+The only supported capability in version 1 is "get".
+
+Afterwards Git sends a list of "key=value" pairs terminated with a flush
+packet. The list will contain at least the command (based on the
+supported capabilities) and the sha1 of the object to retrieve. Please
+note, that the process must not send any response before it received the
+final flush packet.
+
+When the process receives the "get" command, it should make the requested
+object available in the git object store and then return success. Git will
+then check the object store again and this time find it and proceed.
+------------------------
+packet: git> command=get
+packet: git> sha1=0a214a649e1b3d5011e14a3dc227753f2bd2be05
+packet: git> 0000
+------------------------
+
+The process is expected to respond with a list of "key=value" pairs
+terminated with a flush packet. If the process does not experience
+problems then the list must contain a "success" status.
+------------------------
+packet: git< status=success
+packet: git< 0000
+------------------------
+
+In case the process cannot or does not want to process the content, it
+is expected to respond with an "error" status.
+------------------------
+packet: git< status=error
+packet: git< 0000
+------------------------
+
+In case the process cannot or does not want to process the content as
+well as any future content for the lifetime of the Git process, then it
+is expected to respond with an "abort" status at any point in the
+protocol.
+------------------------
+packet: git< status=abort
+packet: git< 0000
+------------------------
+
+Git neither stops nor restarts the process in case the "error"/"abort"
+status is set.
+
+If the process dies during the communication or does not adhere to the
+protocol then Git will stop the process and restart it with the next
+object that needs to be processed.
+
+After the read-object process has processed an object it is expected to
+wait for the next "key=value" list containing a command. Git will close
+the command pipe on exit. The process is expected to detect EOF and exit
+gracefully on its own. Git will wait until the process has stopped.
+
+A long running read-object process demo implementation can be found in
+`contrib/long-running-read-object/example.pl` located in the Git core
+repository. If you develop your own long running process then the
+`GIT_TRACE_PACKET` environment variables can be very helpful for
+debugging (see linkgit:git[1]).
diff --git a/Documentation/technical/status-serialization-format.txt b/Documentation/technical/status-serialization-format.txt
new file mode 100644
index 00000000000000..475ae814495581
--- /dev/null
+++ b/Documentation/technical/status-serialization-format.txt
@@ -0,0 +1,107 @@
+Git status serialization format
+===============================
+
+Git status serialization enables git to dump the results of a status scan
+to a binary file. This file can then be loaded by later status invocations
+to print the cached status results.
+
+The file contains the essential fields from:
+() the index
+() the "struct wt_status" for the overall results
+() the contents of "struct wt_status_change_data" for tracked changed files
+() the list of untracked and ignored files
+
+Version 1 Format:
+=================
+
+The V1 file begins with a required header section followed by optional
+sections for each type of item (changed, untracked, ignored). Individual
+item sections are only present if necessary. Each item section begins
+with an item-type header with the number of items in the section.
+
+Each "line" in the format is encoded using pkt-line with a final LF.
+Flush packets are used to terminate sections.
+
+-----------------
+PKT-LINE("version" SP "1")
+
+[]
+[]
+[]
+-----------------
+
+
+V1 Header
+---------
+
+The v1-header-section fields are taken directly from "struct wt_status".
+Each field is printed on a separate pkt-line. Lines for NULL string
+values are omitted. All integers are printed with "%d". OIDs are
+printed in hex.
+
+v1-header-section =
+
+ PKT-LINE()
+
+v1-index-headers = PKT-LINE("index_mtime" SP SP LF)
+
+v1-wt-status-headers = PKT-LINE("is_initial" SP LF)
+ [ PKT-LINE("branch" SP LF) ]
+ [ PKT-LINE("reference" SP LF) ]
+ PKT-LINE("show_ignored_files" SP LF)
+ PKT-LINE("show_untracked_files" SP LF)
+ PKT-LINE("show_ignored_directory" SP LF)
+ [ PKT-LINE("ignore_submodule_arg" SP LF) ]
+ PKT-LINE("detect_rename" SP LF)
+ PKT-LINE("rename_score" SP LF)
+ PKT-LINE("rename_limit" SP LF)
+ PKT-LINE("detect_break" SP LF)
+ PKT-LINE("sha1_commit" SP LF)
+ PKT-LINE("committable" SP LF)
+ PKT-LINE("workdir_dirty" SP LF)
+
+
+V1 Changed Items
+----------------
+
+The v1-changed-item-section lists all of the changed items with one
+item per pkt-line. Each pkt-line contains: a binary block of data
+from "struct wt_status_serialize_data_fixed" in a fixed header where
+integers are in network byte order and OIDs are in raw (non-hex) form.
+This is followed by one or two raw pathnames (not c-quoted) with NUL
+terminators (both NULs are always present even if there is no rename).
+
+v1-changed-item-section = PKT-LINE("changed" SP LF)
+ [ PKT-LINE( LF) ]+
+ PKT-LINE()
+
+changed_item =
+
+
+
+
+
+
+
+
+
+
+
+ NUL
+ [ ]
+ NUL
+
+
+V1 Untracked and Ignored Items
+------------------------------
+
+These sections are simple lists of pathnames. They ARE NOT
+c-quoted.
+
+v1-untracked-item-section = PKT-LINE("untracked" SP LF)
+ [ PKT-LINE( LF) ]+
+ PKT-LINE()
+
+v1-ignored-item-section = PKT-LINE("ignored" SP LF)
+ [ PKT-LINE( LF) ]+
+ PKT-LINE()
diff --git a/GIT-VERSION-GEN b/GIT-VERSION-GEN
index a3eb6eca7abfb5..902d11c2821a87 100755
--- a/GIT-VERSION-GEN
+++ b/GIT-VERSION-GEN
@@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.38.0
+DEF_VER=v2.38.0.vfs.0.0
LF='
'
@@ -12,10 +12,15 @@ if test -f version
then
VN=$(cat version) || VN="$DEF_VER"
elif test -d ${GIT_DIR:-.git} -o -f .git &&
- VN=$(git describe --match "v[0-9]*" HEAD 2>/dev/null) &&
+ VN=$(git describe --match "v[0-9]*vfs*" HEAD 2>/dev/null) &&
case "$VN" in
*$LF*) (exit 1) ;;
v[0-9]*)
+ if test "${VN%%.vfs.*}" != "${DEF_VER%%.vfs.*}"
+ then
+ echo "Found version $VN, which is not based on $DEF_VER" >&2
+ exit 1
+ fi
git update-index -q --refresh
test -z "$(git diff-index --name-only HEAD --)" ||
VN="$VN-dirty" ;;
diff --git a/Makefile b/Makefile
index 5d0de405ae0ebc..b8548e77d1cf78 100644
--- a/Makefile
+++ b/Makefile
@@ -402,6 +402,10 @@ include shared.mak
# Define GIT_USER_AGENT if you want to change how git identifies itself during
# network interactions. The default is "git/$(GIT_VERSION)".
#
+# Define GIT_BUILT_FROM_COMMIT if you want to force the commit hash identified
+# in 'git version --build-options' to a specific value. The default is the
+# commit hash of the current HEAD.
+#
# Define DEFAULT_HELP_FORMAT to "man", "info" or "html"
# (defaults to "man") if you want to have a different default when
# "git help" is called without a parameter specifying the format.
@@ -973,6 +977,8 @@ LIB_OBJS += gettext.o
LIB_OBJS += gpg-interface.o
LIB_OBJS += graph.o
LIB_OBJS += grep.o
+LIB_OBJS += gvfs.o
+LIB_OBJS += gvfs-helper-client.o
LIB_OBJS += hash-lookup.o
LIB_OBJS += hashmap.o
LIB_OBJS += help.o
@@ -1120,6 +1126,7 @@ LIB_OBJS += utf8.o
LIB_OBJS += varint.o
LIB_OBJS += version.o
LIB_OBJS += versioncmp.o
+LIB_OBJS += virtualfilesystem.o
LIB_OBJS += walker.o
LIB_OBJS += wildmatch.o
LIB_OBJS += worktree.o
@@ -1127,6 +1134,8 @@ LIB_OBJS += wrapper.o
LIB_OBJS += write-or-die.o
LIB_OBJS += ws.o
LIB_OBJS += wt-status.o
+LIB_OBJS += wt-status-deserialize.o
+LIB_OBJS += wt-status-serialize.o
LIB_OBJS += xdiff-interface.o
LIB_OBJS += zlib.o
@@ -1243,6 +1252,7 @@ BUILTIN_OBJS += builtin/tag.o
BUILTIN_OBJS += builtin/unpack-file.o
BUILTIN_OBJS += builtin/unpack-objects.o
BUILTIN_OBJS += builtin/update-index.o
+BUILTIN_OBJS += builtin/update-microsoft-git.o
BUILTIN_OBJS += builtin/update-ref.o
BUILTIN_OBJS += builtin/update-server-info.o
BUILTIN_OBJS += builtin/upload-archive.o
@@ -1503,6 +1513,9 @@ else
endif
BASIC_CFLAGS += $(CURL_CFLAGS)
+ PROGRAM_OBJS += gvfs-helper.o
+ TEST_PROGRAMS_NEED_X += test-gvfs-protocol
+
REMOTE_CURL_PRIMARY = git-remote-http$X
REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
REMOTE_CURL_NAMES = $(REMOTE_CURL_PRIMARY) $(REMOTE_CURL_ALIASES)
@@ -2223,6 +2236,15 @@ GIT-USER-AGENT: FORCE
echo '$(GIT_USER_AGENT_SQ)' >GIT-USER-AGENT; \
fi
+GIT_BUILT_FROM_COMMIT = $(eval GIT_BUILT_FROM_COMMIT := $$(shell \
+ GIT_CEILING_DIRECTORIES="$$(CURDIR)/.." \
+ git rev-parse -q --verify HEAD 2>/dev/null))$(GIT_BUILT_FROM_COMMIT)
+GIT-BUILT-FROM-COMMIT: FORCE
+ @if test x'$(GIT_BUILT_FROM_COMMIT)' != x"`cat GIT-BUILT-FROM-COMMIT 2>/dev/null`" ; then \
+ echo >&2 " * new built-from commit"; \
+ echo '$(GIT_BUILT_FROM_COMMIT)' >GIT-BUILT-FROM-COMMIT; \
+ fi
+
ifdef DEFAULT_HELP_FORMAT
BASIC_CFLAGS += -DDEFAULT_HELP_FORMAT='"$(DEFAULT_HELP_FORMAT)"'
endif
@@ -2337,13 +2359,11 @@ PAGER_ENV_CQ_SQ = $(subst ','\'',$(PAGER_ENV_CQ))
pager.sp pager.s pager.o: EXTRA_CPPFLAGS = \
-DPAGER_ENV='$(PAGER_ENV_CQ_SQ)'
-version.sp version.s version.o: GIT-VERSION-FILE GIT-USER-AGENT
+version.sp version.s version.o: GIT-VERSION-FILE GIT-USER-AGENT GIT-BUILT-FROM-COMMIT
version.sp version.s version.o: EXTRA_CPPFLAGS = \
'-DGIT_VERSION="$(GIT_VERSION)"' \
'-DGIT_USER_AGENT=$(GIT_USER_AGENT_CQ_SQ)' \
- '-DGIT_BUILT_FROM_COMMIT="$(shell \
- GIT_CEILING_DIRECTORIES="$(CURDIR)/.." \
- git rev-parse -q --verify HEAD 2>/dev/null)"'
+ '-DGIT_BUILT_FROM_COMMIT="$(GIT_BUILT_FROM_COMMIT)"'
$(BUILT_INS): git$X
$(QUIET_BUILT_IN)$(RM) $@ && \
@@ -2584,6 +2604,7 @@ GIT_OBJS += git.o
.PHONY: git-objs
git-objs: $(GIT_OBJS)
+SCALAR_OBJS := json-parser.o
SCALAR_OBJS += scalar.o
.PHONY: scalar-objs
scalar-objs: $(SCALAR_OBJS)
@@ -2681,7 +2702,7 @@ gettext.sp gettext.s gettext.o: GIT-PREFIX
gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \
-DGIT_LOCALE_PATH='"$(localedir_relative_SQ)"'
-http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SP_EXTRA_FLAGS += \
+http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp gvfs-helper.sp: SP_EXTRA_FLAGS += \
-DCURL_DISABLE_TYPECHECK
pack-revindex.sp: SP_EXTRA_FLAGS += -Wno-memcpy-max-count
@@ -2732,10 +2753,14 @@ $(REMOTE_CURL_PRIMARY): remote-curl.o http.o http-walker.o GIT-LDFLAGS $(GITLIBS
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
$(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
-scalar$X: scalar.o GIT-LDFLAGS $(GITLIBS)
+scalar$X: $(SCALAR_OBJS) GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) \
$(filter %.o,$^) $(LIBS)
+git-gvfs-helper$X: gvfs-helper.o http.o GIT-LDFLAGS $(GITLIBS)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
+ $(CURL_LIBCURL) $(EXPAT_LIBEXPAT) $(LIBS)
+
$(LIB_FILE): $(LIB_OBJS)
$(QUIET_AR)$(RM) $@ && $(AR) $(ARFLAGS) $@ $^
@@ -3424,7 +3449,7 @@ dist: git-archive$(X) configure
@$(MAKE) -C git-gui TARDIR=../.dist-tmp-dir/git-gui dist-version
./git-archive --format=tar \
$(GIT_ARCHIVE_EXTRA_FILES) \
- --prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar
+ --prefix=$(GIT_TARNAME)/ HEAD > $(GIT_TARNAME).tar
@$(RM) -r .dist-tmp-dir
gzip -f -9 $(GIT_TARNAME).tar
diff --git a/README.md b/README.md
index e2314c5a313c08..f92e1e08ddc99d 100644
--- a/README.md
+++ b/README.md
@@ -1,147 +1,169 @@
-Git for Windows
-===============
-
-[![Open in Visual Studio Code](https://img.shields.io/static/v1?logo=visualstudiocode&label=&message=Open%20in%20Visual%20Studio%20Code&labelColor=2c2c32&color=007acc&logoColor=007acc)](https://open.vscode.dev/git-for-windows/git)
-[![Build status](https://github.com/git-for-windows/git/workflows/CI/badge.svg)](https://github.com/git-for-windows/git/actions?query=branch%3Amain+event%3Apush)
-[![Join the chat at https://gitter.im/git-for-windows/git](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/git-for-windows/git?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-This is [Git for Windows](http://git-for-windows.github.io/), the Windows port
-of [Git](http://git-scm.com/).
-
-The Git for Windows project is run using a [governance
-model](http://git-for-windows.github.io/governance-model.html). If you
-encounter problems, you can report them as [GitHub
-issues](https://github.com/git-for-windows/git/issues), discuss them on Git
-for Windows' [Google Group](http://groups.google.com/group/git-for-windows),
-and [contribute bug
-fixes](https://github.com/git-for-windows/git/wiki/How-to-participate).
-
-To build Git for Windows, please either install [Git for Windows'
-SDK](https://gitforwindows.org/#download-sdk), start its `git-bash.exe`, `cd`
-to your Git worktree and run `make`, or open the Git worktree as a folder in
-Visual Studio.
-
-To verify that your build works, use one of the following methods:
-
-- If you want to test the built executables within Git for Windows' SDK,
- prepend `/bin-wrappers` to the `PATH`.
-- Alternatively, run `make install` in the Git worktree.
-- If you need to test this in a full installer, run `sdk build
- git-and-installer`.
-- You can also "install" Git into an existing portable Git via `make install
- DESTDIR=` where `` refers to the top-level directory of the
- portable Git. In this instance, you will want to prepend that portable Git's
- `/cmd` directory to the `PATH`, or test by running that portable Git's
- `git-bash.exe` or `git-cmd.exe`.
-- If you built using a recent Visual Studio, you can use the menu item
- `Build>Install git` (you will want to click on `Project>CMake Settings for
- Git` first, then click on `Edit JSON` and then point `installRoot` to the
- `mingw64` directory of an already-unpacked portable Git).
-
- As in the previous bullet point, you will then prepend `/cmd` to the `PATH`
- or run using the portable Git's `git-bash.exe` or `git-cmd.exe`.
-- If you want to run the built executables in-place, but in a CMD instead of
- inside a Bash, you can run a snippet like this in the `git-bash.exe` window
- where Git was built (ensure that the `EOF` line has no leading spaces), and
- then paste into the CMD window what was put in the clipboard:
-
- ```sh
- clip.exe <
-including full documentation and Git related tools.
-
-See [Documentation/gittutorial.txt][] to get started, then see
-[Documentation/giteveryday.txt][] for a useful minimum set of commands, and
-`Documentation/git-.txt` for documentation of each command.
-If git has been correctly installed, then the tutorial can also be
-read with `man gittutorial` or `git help tutorial`, and the
-documentation of each command with `man git-` or `git help
-`.
-
-CVS users may also want to read [Documentation/gitcvs-migration.txt][]
-(`man gitcvs-migration` or `git help cvs-migration` if git is
-installed).
-
-The user discussion and development of core Git take place on the Git
-mailing list -- everyone is welcome to post bug reports, feature
-requests, comments and patches to git@vger.kernel.org (read
-[Documentation/SubmittingPatches][] for instructions on patch submission
-and [Documentation/CodingGuidelines][]).
-
-Those wishing to help with error message, usage and informational message
-string translations (localization l10) should see [po/README.md][]
-(a `po` file is a Portable Object file that holds the translations).
-
-To subscribe to the list, send an email with just "subscribe git" in
-the body to majordomo@vger.kernel.org (not the Git list). The mailing
-list archives are available at ,
- and other archival sites.
-The core git mailing list is plain text (no HTML!).
-
-Issues which are security relevant should be disclosed privately to
-the Git Security mailing list .
-
-The maintainer frequently sends the "What's cooking" reports that
-list the current status of various development topics to the mailing
-list. The discussion following them give a good reference for
-project status, development direction and remaining tasks.
-
-The name "git" was given by Linus Torvalds when he wrote the very
-first version. He described the tool as "the stupid content tracker"
-and the name as (depending on your mood):
-
- - random three-letter combination that is pronounceable, and not
- actually used by any common UNIX command. The fact that it is a
- mispronunciation of "get" may or may not be relevant.
- - stupid. contemptible and despicable. simple. Take your pick from the
- dictionary of slang.
- - "global information tracker": you're in a good mood, and it actually
- works for you. Angels sing, and a light suddenly fills the room.
- - "goddamn idiotic truckload of sh*t": when it breaks
-
-[INSTALL]: INSTALL
-[Documentation/gittutorial.txt]: Documentation/gittutorial.txt
-[Documentation/giteveryday.txt]: Documentation/giteveryday.txt
-[Documentation/gitcvs-migration.txt]: Documentation/gitcvs-migration.txt
-[Documentation/SubmittingPatches]: Documentation/SubmittingPatches
-[Documentation/CodingGuidelines]: Documentation/CodingGuidelines
-[po/README.md]: po/README.md
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
+Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
+the rights to use your contribution. For details, visit .
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
+a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
+provided by the bot. You will only need to do this once across all repos using our CLA.
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/apply.c b/apply.c
index f86a01b26080dd..b0453ac2ab7d93 100644
--- a/apply.c
+++ b/apply.c
@@ -3353,6 +3353,24 @@ static int checkout_target(struct index_state *istate,
{
struct checkout costate = CHECKOUT_INIT;
+ /*
+ * Do not checkout the entry if the skipworktree bit is set
+ *
+ * Both callers of this method (check_preimage and load_current)
+ * check for the existance of the file before calling this
+ * method so we know that the file doesn't exist at this point
+ * and we don't need to perform that check again here.
+ * We just need to check the skip-worktree and return.
+ *
+ * This is to prevent git from creating a file in the
+ * working directory that has the skip-worktree bit on,
+ * then updating the index from the patch and not keeping
+ * the working directory version up to date with what it
+ * changed the index version to be.
+ */
+ if (ce_skip_worktree(ce))
+ return 0;
+
costate.refresh_cache = 1;
costate.istate = istate;
if (checkout_entry(ce, &costate, NULL, NULL) ||
diff --git a/builtin.h b/builtin.h
index 8901a34d6bf424..88597024d369eb 100644
--- a/builtin.h
+++ b/builtin.h
@@ -231,6 +231,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix);
int cmd_unpack_file(int argc, const char **argv, const char *prefix);
int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
int cmd_update_index(int argc, const char **argv, const char *prefix);
+int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix);
int cmd_update_ref(int argc, const char **argv, const char *prefix);
int cmd_update_server_info(int argc, const char **argv, const char *prefix);
int cmd_upload_archive(int argc, const char **argv, const char *prefix);
diff --git a/builtin/add.c b/builtin/add.c
index 76d5ad1f5da190..9a10501558ac79 100644
--- a/builtin/add.c
+++ b/builtin/add.c
@@ -47,6 +47,7 @@ static int chmod_pathspec(struct pathspec *pathspec, char flip, int show_only)
int err;
if (!include_sparse &&
+ !core_virtualfilesystem &&
(ce_skip_worktree(ce) ||
!path_in_sparse_checkout(ce->name, &the_index)))
continue;
@@ -97,7 +98,8 @@ static void update_callback(struct diff_queue_struct *q,
struct diff_filepair *p = q->queue[i];
const char *path = p->one->path;
- if (!include_sparse && !path_in_sparse_checkout(path, &the_index))
+ if (!include_sparse && !core_virtualfilesystem &&
+ !path_in_sparse_checkout(path, &the_index))
continue;
switch (fix_unmerged_status(p, data)) {
@@ -215,8 +217,9 @@ static int refresh(int verbose, const struct pathspec *pathspec)
if (!seen[i]) {
const char *path = pathspec->items[i].original;
- if (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
- !path_in_sparse_checkout(path, &the_index)) {
+ if (!core_virtualfilesystem &&
+ (matches_skip_worktree(pathspec, i, &skip_worktree_seen) ||
+ !path_in_sparse_checkout(path, &the_index))) {
string_list_append(&only_match_skip_worktree,
pathspec->items[i].original);
} else {
@@ -226,7 +229,11 @@ static int refresh(int verbose, const struct pathspec *pathspec)
}
}
- if (only_match_skip_worktree.nr) {
+ /*
+ * When using a virtual filesystem, we might re-add a path
+ * that is currently virtual and we want that to succeed.
+ */
+ if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
advise_on_updating_sparse_paths(&only_match_skip_worktree);
ret = 1;
}
@@ -644,7 +651,11 @@ int cmd_add(int argc, const char **argv, const char *prefix)
if (seen[i])
continue;
- if (!include_sparse &&
+ /*
+ * When using a virtual filesystem, we might re-add a path
+ * that is currently virtual and we want that to succeed.
+ */
+ if (!include_sparse && !core_virtualfilesystem &&
matches_skip_worktree(&pathspec, i, &skip_worktree_seen)) {
string_list_append(&only_match_skip_worktree,
pathspec.items[i].original);
@@ -668,7 +679,6 @@ int cmd_add(int argc, const char **argv, const char *prefix)
}
}
-
if (only_match_skip_worktree.nr) {
advise_on_updating_sparse_paths(&only_match_skip_worktree);
exit_status = 1;
diff --git a/builtin/checkout.c b/builtin/checkout.c
index 0490a19fdab2b1..25b0eb91346faa 100644
--- a/builtin/checkout.c
+++ b/builtin/checkout.c
@@ -14,6 +14,7 @@
#include "lockfile.h"
#include "merge-recursive.h"
#include "object-store.h"
+#include "packfile.h"
#include "parse-options.h"
#include "refs.h"
#include "remote.h"
@@ -986,8 +987,16 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
remove_branch_state(the_repository, !opts->quiet);
strbuf_release(&msg);
if (!opts->quiet &&
- (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+ (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD")))) {
+ unsigned long nr_unpack_entry_at_start;
+
+ trace2_region_enter("tracking", "report_tracking", the_repository);
+ nr_unpack_entry_at_start = get_nr_unpack_entry();
report_tracking(new_branch_info);
+ trace2_data_intmax("tracking", NULL, "report_tracking/nr_unpack_entries",
+ (intmax_t)(get_nr_unpack_entry() - nr_unpack_entry_at_start));
+ trace2_region_leave("tracking", "report_tracking", the_repository);
+ }
}
static int add_pending_uninteresting_ref(const char *refname,
diff --git a/builtin/commit.c b/builtin/commit.c
index cf0e24d3abf25c..59064d2178f207 100644
--- a/builtin/commit.c
+++ b/builtin/commit.c
@@ -160,6 +160,122 @@ static int opt_parse_porcelain(const struct option *opt, const char *arg, int un
return 0;
}
+static int do_serialize = 0;
+static char *serialize_path = NULL;
+
+static int reject_implicit = 0;
+static int do_implicit_deserialize = 0;
+static int do_explicit_deserialize = 0;
+static char *deserialize_path = NULL;
+
+static enum wt_status_deserialize_wait implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+static enum wt_status_deserialize_wait explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+
+/*
+ * --serialize | --serialize=
+ *
+ * Request that we serialize status output rather than or in addition to
+ * printing in any of the established formats.
+ *
+ * Without a path, we write binary serialization data to stdout (and omit
+ * the normal status output).
+ *
+ * With a path, we write binary serialization data to the and then
+ * write normal status output.
+ */
+static int opt_parse_serialize(const struct option *opt, const char *arg, int unset)
+{
+ enum wt_status_format *value = (enum wt_status_format *)opt->value;
+ if (unset || !arg)
+ *value = STATUS_FORMAT_SERIALIZE_V1;
+
+ if (arg) {
+ free(serialize_path);
+ serialize_path = xstrdup(arg);
+ }
+
+ if (do_explicit_deserialize)
+ die("cannot mix --serialize and --deserialize");
+ do_implicit_deserialize = 0;
+
+ do_serialize = 1;
+ return 0;
+}
+
+/*
+ * --deserialize | --deserialize= |
+ * --no-deserialize
+ *
+ * Request that we deserialize status data from some existing resource
+ * rather than performing a status scan.
+ *
+ * The input source can come from stdin or a path given here -- or be
+ * inherited from the config settings.
+ */
+static int opt_parse_deserialize(const struct option *opt, const char *arg, int unset)
+{
+ if (unset) {
+ do_implicit_deserialize = 0;
+ do_explicit_deserialize = 0;
+ } else {
+ if (do_serialize)
+ die("cannot mix --serialize and --deserialize");
+ if (arg) {
+ /* override config or stdin */
+ free(deserialize_path);
+ deserialize_path = xstrdup(arg);
+ }
+ if (!deserialize_path || !*deserialize_path)
+ do_explicit_deserialize = 1; /* read stdin */
+ else if (wt_status_deserialize_access(deserialize_path, R_OK) == 0)
+ do_explicit_deserialize = 1; /* can read from this file */
+ else {
+ /*
+ * otherwise, silently fallback to the normal
+ * collection scan
+ */
+ do_implicit_deserialize = 0;
+ do_explicit_deserialize = 0;
+ }
+ }
+
+ return 0;
+}
+
+static enum wt_status_deserialize_wait parse_dw(const char *arg)
+{
+ int tenths;
+
+ if (!strcmp(arg, "fail"))
+ return DESERIALIZE_WAIT__FAIL;
+ else if (!strcmp(arg, "block"))
+ return DESERIALIZE_WAIT__BLOCK;
+ else if (!strcmp(arg, "no"))
+ return DESERIALIZE_WAIT__NO;
+
+ /*
+ * Otherwise, assume it is a timeout in tenths of a second.
+ * If it contains a bogus value, atol() will return zero
+ * which is OK.
+ */
+ tenths = atol(arg);
+ if (tenths < 0)
+ tenths = DESERIALIZE_WAIT__NO;
+ return tenths;
+}
+
+static int opt_parse_deserialize_wait(const struct option *opt,
+ const char *arg,
+ int unset)
+{
+ if (unset)
+ explicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+ else
+ explicit_deserialize_wait = parse_dw(arg);
+
+ return 0;
+}
+
static int opt_parse_m(const struct option *opt, const char *arg, int unset)
{
struct strbuf *buf = opt->value;
@@ -1152,6 +1268,8 @@ static void handle_untracked_files_arg(struct wt_status *s)
s->show_untracked_files = SHOW_NORMAL_UNTRACKED_FILES;
else if (!strcmp(untracked_files_arg, "all"))
s->show_untracked_files = SHOW_ALL_UNTRACKED_FILES;
+ else if (!strcmp(untracked_files_arg,"complete"))
+ s->show_untracked_files = SHOW_COMPLETE_UNTRACKED_FILES;
/*
* Please update $__git_untracked_file_modes in
* git-completion.bash when you add new options
@@ -1437,6 +1555,28 @@ static int git_status_config(const char *k, const char *v, void *cb)
s->relative_paths = git_config_bool(k, v);
return 0;
}
+ if (!strcmp(k, "status.deserializepath")) {
+ /*
+ * Automatically assume deserialization if this is
+ * set in the config and the file exists. Do not
+ * complain if the file does not exist, because we
+ * silently fall back to normal mode.
+ */
+ if (v && *v && access(v, R_OK) == 0) {
+ do_implicit_deserialize = 1;
+ deserialize_path = xstrdup(v);
+ } else {
+ reject_implicit = 1;
+ }
+ return 0;
+ }
+ if (!strcmp(k, "status.deserializewait")) {
+ if (!v || !*v)
+ implicit_deserialize_wait = DESERIALIZE_WAIT__UNSET;
+ else
+ implicit_deserialize_wait = parse_dw(v);
+ return 0;
+ }
if (!strcmp(k, "status.showuntrackedfiles")) {
if (!v)
return config_error_nonbool(k);
@@ -1477,7 +1617,8 @@ int cmd_status(int argc, const char **argv, const char *prefix)
static const char *rename_score_arg = (const char *)-1;
static struct wt_status s;
unsigned int progress_flag = 0;
- int fd;
+ int try_deserialize;
+ int fd = -1;
struct object_id oid;
static struct option builtin_status_options[] = {
OPT__VERBOSE(&verbose, N_("be verbose")),
@@ -1492,6 +1633,15 @@ int cmd_status(int argc, const char **argv, const char *prefix)
OPT_CALLBACK_F(0, "porcelain", &status_format,
N_("version"), N_("machine-readable output"),
PARSE_OPT_OPTARG, opt_parse_porcelain),
+ { OPTION_CALLBACK, 0, "serialize", &status_format,
+ N_("path"), N_("serialize raw status data to path or stdout"),
+ PARSE_OPT_OPTARG | PARSE_OPT_NONEG, opt_parse_serialize },
+ { OPTION_CALLBACK, 0, "deserialize", NULL,
+ N_("path"), N_("deserialize raw status data from file"),
+ PARSE_OPT_OPTARG, opt_parse_deserialize },
+ { OPTION_CALLBACK, 0, "deserialize-wait", NULL,
+ N_("fail|block|no"), N_("how to wait if status cache file is invalid"),
+ PARSE_OPT_OPTARG, opt_parse_deserialize_wait },
OPT_SET_INT(0, "long", &status_format,
N_("show status in long format (default)"),
STATUS_FORMAT_LONG),
@@ -1536,10 +1686,53 @@ int cmd_status(int argc, const char **argv, const char *prefix)
s.show_untracked_files == SHOW_NO_UNTRACKED_FILES)
die(_("Unsupported combination of ignored and untracked-files arguments"));
+ if (s.show_untracked_files == SHOW_COMPLETE_UNTRACKED_FILES &&
+ s.show_ignored_mode == SHOW_NO_IGNORED)
+ die(_("Complete Untracked only supported with ignored files"));
+
parse_pathspec(&s.pathspec, 0,
PATHSPEC_PREFER_FULL,
prefix, argv);
+ /*
+ * If we want to try to deserialize status data from a cache file,
+ * we need to re-order the initialization code. The problem is that
+ * this makes for a very nasty diff and causes merge conflicts as we
+ * carry it forward. And it easy to mess up the merge, so we
+ * duplicate some code here to hopefully reduce conflicts.
+ */
+ try_deserialize = (!do_serialize &&
+ (do_implicit_deserialize || do_explicit_deserialize));
+
+ /*
+ * Disable deserialize when verbose is set because it causes us to
+ * print diffs for each modified file, but that requires us to have
+ * the index loaded and we don't want to do that (at least not now for
+ * this seldom used feature). My fear is that would further tangle
+ * the merge conflict with upstream.
+ *
+ * TODO Reconsider this in the future.
+ */
+ if (try_deserialize && verbose) {
+ trace2_data_string("status", the_repository, "deserialize/reject",
+ "args/verbose");
+ try_deserialize = 0;
+ }
+
+ if (try_deserialize)
+ goto skip_init;
+ /*
+ * If we implicitly received a status cache pathname from the config
+ * and the file does not exist, we silently reject it and do the normal
+ * status "collect". Fake up some trace2 messages to reflect this and
+ * assist post-processors know this case is different.
+ */
+ if (!do_serialize && reject_implicit) {
+ trace2_cmd_mode("implicit-deserialize");
+ trace2_data_string("status", the_repository, "deserialize/reject",
+ "status-cache/access");
+ }
+
enable_fscache(0);
if (status_format != STATUS_FORMAT_PORCELAIN &&
status_format != STATUS_FORMAT_PORCELAIN_V2)
@@ -1554,6 +1747,7 @@ int cmd_status(int argc, const char **argv, const char *prefix)
else
fd = -1;
+skip_init:
s.is_initial = get_oid(s.reference, &oid) ? 1 : 0;
if (!s.is_initial)
oidcpy(&s.oid_commit, &oid);
@@ -1570,6 +1764,36 @@ int cmd_status(int argc, const char **argv, const char *prefix)
s.rename_score = parse_rename_score(&rename_score_arg);
}
+ if (try_deserialize) {
+ int result;
+ enum wt_status_deserialize_wait dw = implicit_deserialize_wait;
+ if (explicit_deserialize_wait != DESERIALIZE_WAIT__UNSET)
+ dw = explicit_deserialize_wait;
+ if (dw == DESERIALIZE_WAIT__UNSET)
+ dw = DESERIALIZE_WAIT__NO;
+
+ if (s.relative_paths)
+ s.prefix = prefix;
+
+ trace2_cmd_mode("deserialize");
+ result = wt_status_deserialize(&s, deserialize_path, dw);
+ if (result == DESERIALIZE_OK)
+ return 0;
+ if (dw == DESERIALIZE_WAIT__FAIL)
+ die(_("Rejected status serialization cache"));
+
+ /* deserialize failed, so force the initialization we skipped above. */
+ enable_fscache(1);
+ read_cache_preload(&s.pathspec);
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
+
+ if (use_optional_locks())
+ fd = hold_locked_index(&index_lock, 0);
+ else
+ fd = -1;
+ }
+
+ trace2_cmd_mode("collect");
wt_status_collect(&s);
if (0 <= fd)
@@ -1578,6 +1802,17 @@ int cmd_status(int argc, const char **argv, const char *prefix)
if (s.relative_paths)
s.prefix = prefix;
+ if (serialize_path) {
+ int fd_serialize = xopen(serialize_path,
+ O_WRONLY | O_CREAT | O_TRUNC, 0666);
+ if (fd_serialize < 0)
+ die_errno(_("could not serialize to '%s'"),
+ serialize_path);
+ trace2_cmd_mode("serialize");
+ wt_status_serialize_v1(fd_serialize, &s);
+ close(fd_serialize);
+ }
+
wt_status_print(&s);
wt_status_collect_free_buffers(&s);
diff --git a/builtin/fetch.c b/builtin/fetch.c
index a0fca93bb6a63e..442452702d4df4 100644
--- a/builtin/fetch.c
+++ b/builtin/fetch.c
@@ -13,6 +13,8 @@
#include "string-list.h"
#include "remote.h"
#include "transport.h"
+#include "gvfs.h"
+#include "gvfs-helper-client.h"
#include "run-command.h"
#include "parse-options.h"
#include "sigchain.h"
@@ -1130,6 +1132,13 @@ static int store_updated_refs(const char *raw_url, const char *remote_name,
struct check_connected_options opt = CHECK_CONNECTED_INIT;
rm = ref_map;
+
+ /*
+ * Before checking connectivity, be really sure we have the
+ * latest pack-files loaded into memory.
+ */
+ reprepare_packed_git(the_repository);
+
if (check_connected(iterate_ref_map, &rm, &opt)) {
rc = error(_("%s did not send all necessary objects\n"), url);
goto abort;
@@ -2219,6 +2228,9 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
}
}
+ if (core_gvfs & GVFS_PREFETCH_DURING_FETCH)
+ gh_client__prefetch(0, NULL);
+
if (negotiate_only) {
struct oidset acked_commits = OIDSET_INIT;
struct oidset_iter iter;
diff --git a/builtin/gc.c b/builtin/gc.c
index effe7ac15661ce..e08d6b44b7aebe 100644
--- a/builtin/gc.c
+++ b/builtin/gc.c
@@ -12,6 +12,7 @@
#include "builtin.h"
#include "repository.h"
+#include "gvfs.h"
#include "config.h"
#include "tempfile.h"
#include "lockfile.h"
@@ -610,6 +611,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
if (quiet)
strvec_push(&repack, "-q");
+ if ((!auto_gc || (auto_gc && gc_auto_threshold > 0)) && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("'git gc' is not supported on a GVFS repo"));
+
if (auto_gc) {
/*
* Auto-gc should be least intrusive as possible.
@@ -1002,6 +1006,8 @@ static int write_loose_object_to_stdin(const struct object_id *oid,
return ++(d->count) > d->batch_size;
}
+static const char *object_dir = NULL;
+
static int pack_loose(struct maintenance_run_opts *opts)
{
struct repository *r = the_repository;
@@ -1009,11 +1015,14 @@ static int pack_loose(struct maintenance_run_opts *opts)
struct write_loose_object_data data;
struct child_process pack_proc = CHILD_PROCESS_INIT;
+ if (!object_dir)
+ object_dir = r->objects->odb->path;
+
/*
* Do not start pack-objects process
* if there are no loose objects.
*/
- if (!for_each_loose_file_in_objdir(r->objects->odb->path,
+ if (!for_each_loose_file_in_objdir(object_dir,
bail_on_loose,
NULL, NULL, NULL))
return 0;
@@ -1023,7 +1032,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
strvec_push(&pack_proc.args, "pack-objects");
if (opts->quiet)
strvec_push(&pack_proc.args, "--quiet");
- strvec_pushf(&pack_proc.args, "%s/pack/loose", r->objects->odb->path);
+ strvec_pushf(&pack_proc.args, "%s/pack/loose", object_dir);
pack_proc.in = -1;
@@ -1036,7 +1045,7 @@ static int pack_loose(struct maintenance_run_opts *opts)
data.count = 0;
data.batch_size = 50000;
- for_each_loose_file_in_objdir(r->objects->odb->path,
+ for_each_loose_file_in_objdir(object_dir,
write_loose_object_to_stdin,
NULL,
NULL,
@@ -1276,6 +1285,8 @@ static int maintenance_run_tasks(struct maintenance_run_opts *opts)
char *lock_path = xstrfmt("%s/maintenance", r->objects->odb->path);
if (hold_lock_file_for_update(&lk, lock_path, LOCK_NO_DEREF) < 0) {
+ struct stat st;
+ struct strbuf lock_dot_lock = STRBUF_INIT;
/*
* Another maintenance command is running.
*
@@ -1286,6 +1297,25 @@ static int maintenance_run_tasks(struct maintenance_run_opts *opts)
if (!opts->auto_flag && !opts->quiet)
warning(_("lock file '%s' exists, skipping maintenance"),
lock_path);
+
+ /*
+ * Check timestamp on .lock file to see if we should
+ * delete it to recover from a fail state.
+ */
+ strbuf_addstr(&lock_dot_lock, lock_path);
+ strbuf_addstr(&lock_dot_lock, ".lock");
+ if (lstat(lock_dot_lock.buf, &st))
+ warning_errno(_("unable to stat '%s'"), lock_dot_lock.buf);
+ else {
+ if (st.st_mtime < time(NULL) - (6 * 60 * 60)) {
+ if (unlink(lock_dot_lock.buf))
+ warning_errno(_("unable to delete stale lock file"));
+ else
+ warning(_("deleted stale lock file"));
+ }
+ }
+
+ strbuf_release(&lock_dot_lock);
free(lock_path);
return 0;
}
@@ -1414,6 +1444,7 @@ static int maintenance_run(int argc, const char **argv, const char *prefix)
{
int i;
struct maintenance_run_opts opts;
+ const char *tmp_obj_dir = NULL;
struct option builtin_maintenance_run_options[] = {
OPT_BOOL(0, "auto", &opts.auto_flag,
N_("run tasks based on the state of the repository")),
@@ -1447,6 +1478,18 @@ static int maintenance_run(int argc, const char **argv, const char *prefix)
if (argc != 0)
usage_with_options(builtin_maintenance_run_usage,
builtin_maintenance_run_options);
+
+ /*
+ * To enable the VFS for Git/Scalar shared object cache, use
+ * the gvfs.sharedcache config option to redirect the
+ * maintenance to that location.
+ */
+ if (!git_config_get_value("gvfs.sharedcache", &tmp_obj_dir) &&
+ tmp_obj_dir) {
+ object_dir = xstrdup(tmp_obj_dir);
+ setenv(DB_ENVIRONMENT, object_dir, 1);
+ }
+
return maintenance_run_tasks(&opts);
}
@@ -1528,9 +1571,13 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
struct option options[] = {
OPT_END(),
};
- int rc;
+ const char *key = "maintenance.repo";
+ int rc = 0;
struct child_process config_unset = CHILD_PROCESS_INIT;
char *maintpath = get_maintpath();
+ int found = 0;
+ struct string_list_item *item;
+ const struct string_list *list = git_config_get_value_multi(key);
argc = parse_options(argc, argv, prefix, options,
builtin_maintenance_unregister_usage, 0);
@@ -1538,11 +1585,21 @@ static int maintenance_unregister(int argc, const char **argv, const char *prefi
usage_with_options(builtin_maintenance_unregister_usage,
options);
- config_unset.git_cmd = 1;
- strvec_pushl(&config_unset.args, "config", "--global", "--unset",
- "--fixed-value", "maintenance.repo", maintpath, NULL);
+ for_each_string_list_item(item, list) {
+ if (!strcmp(maintpath, item->string)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ config_unset.git_cmd = 1;
+ strvec_pushl(&config_unset.args, "config", "--global", "--unset",
+ "--fixed-value", key, maintpath, NULL);
+
+ rc = run_command(&config_unset);
+ }
- rc = run_command(&config_unset);
free(maintpath);
return rc;
}
diff --git a/builtin/index-pack.c b/builtin/index-pack.c
index 6648f2daef5cef..46a61b7cba8bf9 100644
--- a/builtin/index-pack.c
+++ b/builtin/index-pack.c
@@ -801,7 +801,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
if (startup_info->have_repository) {
read_lock();
collision_test_needed =
- has_object_file_with_flags(oid, OBJECT_INFO_QUICK);
+ has_object_file_with_flags(oid, OBJECT_INFO_FOR_PREFETCH);
read_unlock();
}
diff --git a/builtin/reset.c b/builtin/reset.c
index 0d471411ffe398..8eb5cfb22d91b6 100644
--- a/builtin/reset.c
+++ b/builtin/reset.c
@@ -28,6 +28,8 @@
#include "dir.h"
#include "strbuf.h"
#include "quote.h"
+#include "dir.h"
+#include "entry.h"
#define REFRESH_INDEX_DELAY_WARNING_IN_MS (2 * 1000)
@@ -141,9 +143,47 @@ static void update_index_from_diff(struct diff_queue_struct *q,
for (i = 0; i < q->nr; i++) {
int pos;
+ int respect_skip_worktree = 1;
struct diff_filespec *one = q->queue[i]->one;
+ struct diff_filespec *two = q->queue[i]->two;
int is_in_reset_tree = one->mode && !is_null_oid(&one->oid);
+ int is_missing = !(one->mode && !is_null_oid(&one->oid));
+ int was_missing = !two->mode && is_null_oid(&two->oid);
struct cache_entry *ce;
+ struct cache_entry *ceBefore;
+ struct checkout state = CHECKOUT_INIT;
+
+ /*
+ * When using the virtual filesystem feature, the cache entries that are
+ * added here will not have the skip-worktree bit set.
+ *
+ * Without this code there is data that is lost because the files that
+ * would normally be in the working directory are not there and show as
+ * deleted for the next status or in the case of added files just disappear.
+ * We need to create the previous version of the files in the working
+ * directory so that they will have the right content and the next
+ * status call will show modified or untracked files correctly.
+ */
+ if (core_virtualfilesystem && !file_exists(two->path))
+ {
+ pos = cache_name_pos(two->path, strlen(two->path));
+ if ((pos >= 0 && ce_skip_worktree(active_cache[pos])) &&
+ (is_missing || !was_missing))
+ {
+ state.force = 1;
+ state.refresh_cache = 1;
+ state.istate = &the_index;
+ ceBefore = make_cache_entry(&the_index, two->mode,
+ &two->oid, two->path,
+ 0, 0);
+ if (!ceBefore)
+ die(_("make_cache_entry failed for path '%s'"),
+ two->path);
+
+ checkout_entry(ceBefore, &state, NULL, NULL);
+ respect_skip_worktree = 0;
+ }
+ }
if (!is_in_reset_tree && !intent_to_add) {
remove_file_from_cache(one->path);
@@ -162,8 +202,14 @@ static void update_index_from_diff(struct diff_queue_struct *q,
* to properly construct the reset sparse directory.
*/
pos = cache_name_pos(one->path, strlen(one->path));
- if ((pos >= 0 && ce_skip_worktree(active_cache[pos])) ||
- (pos < 0 && !path_in_sparse_checkout(one->path, &the_index)))
+
+ /*
+ * Do not add the SKIP_WORKTREE bit back if we populated the
+ * file on purpose in a virtual filesystem scenario.
+ */
+ if (respect_skip_worktree &&
+ ((pos >= 0 && ce_skip_worktree(active_cache[pos])) ||
+ (pos < 0 && !path_in_sparse_checkout(one->path, &the_index))))
ce->ce_flags |= CE_SKIP_WORKTREE;
if (!ce)
diff --git a/builtin/rm.c b/builtin/rm.c
index b6ba859fe42571..bbf219baf0a7e0 100644
--- a/builtin/rm.c
+++ b/builtin/rm.c
@@ -304,7 +304,7 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
for (i = 0; i < active_nr; i++) {
const struct cache_entry *ce = active_cache[i];
- if (!include_sparse &&
+ if (!include_sparse && !core_virtualfilesystem &&
(ce_skip_worktree(ce) ||
!path_in_sparse_checkout(ce->name, &the_index)))
continue;
@@ -341,7 +341,11 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
*original ? original : ".");
}
- if (only_match_skip_worktree.nr) {
+ /*
+ * When using a virtual filesystem, we might re-add a path
+ * that is currently virtual and we want that to succeed.
+ */
+ if (!core_virtualfilesystem && only_match_skip_worktree.nr) {
advise_on_updating_sparse_paths(&only_match_skip_worktree);
ret = 1;
}
diff --git a/builtin/sparse-checkout.c b/builtin/sparse-checkout.c
index 287716db68e419..d82f02597ace98 100644
--- a/builtin/sparse-checkout.c
+++ b/builtin/sparse-checkout.c
@@ -106,7 +106,7 @@ static int sparse_checkout_list(int argc, const char **argv, const char *prefix)
static void clean_tracked_sparse_directories(struct repository *r)
{
- int i, was_full = 0;
+ int i, value, was_full = 0;
struct strbuf path = STRBUF_INIT;
size_t pathlen;
struct string_list_item *item;
@@ -122,6 +122,13 @@ static void clean_tracked_sparse_directories(struct repository *r)
!r->index->sparse_checkout_patterns->use_cone_patterns)
return;
+ /*
+ * Users can disable this behavior.
+ */
+ if (!repo_config_get_bool(r, "index.deletesparsedirectories", &value) &&
+ !value)
+ return;
+
/*
* Use the sparse index as a data structure to assist finding
* directories that are safe to delete. This conversion to a
diff --git a/builtin/update-index.c b/builtin/update-index.c
index b62249905f1b80..7c01696690f254 100644
--- a/builtin/update-index.c
+++ b/builtin/update-index.c
@@ -5,6 +5,7 @@
*/
#define USE_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
+#include "gvfs.h"
#include "bulk-checkin.h"
#include "config.h"
#include "lockfile.h"
@@ -1170,7 +1171,13 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
argc = parse_options_end(&ctx);
getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ if (mark_skip_worktree_only && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("modifying the skip worktree bit is not supported on a GVFS repo"));
+
if (preferred_index_format) {
+ if (preferred_index_format != 4 && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("changing the index version is not supported on a GVFS repo"));
+
if (preferred_index_format < INDEX_FORMAT_LB ||
INDEX_FORMAT_UB < preferred_index_format)
die("index-version %d not in range: %d..%d",
@@ -1211,6 +1218,9 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
end_odb_transaction();
if (split_index > 0) {
+ if (gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die(_("split index is not supported on a GVFS repo"));
+
if (git_config_get_split_index() == 0)
warning(_("core.splitIndex is set to false; "
"remove or change it, if you really want to "
diff --git a/builtin/update-microsoft-git.c b/builtin/update-microsoft-git.c
new file mode 100644
index 00000000000000..f943b808615fd7
--- /dev/null
+++ b/builtin/update-microsoft-git.c
@@ -0,0 +1,73 @@
+#include "builtin.h"
+#include "repository.h"
+#include "parse-options.h"
+#include "run-command.h"
+#include "strvec.h"
+
+#if defined(GIT_WINDOWS_NATIVE)
+/*
+ * On Windows, run 'git update-git-for-windows' which
+ * is installed by the installer, based on the script
+ * in git-for-windows/build-extra.
+ */
+static int platform_specific_upgrade(void)
+{
+ int res;
+ struct strvec args = STRVEC_INIT;
+
+ strvec_push(&args, "git-update-git-for-windows");
+ res = run_command_v_opt(args.v, 0);
+ strvec_clear(&args);
+ return res;
+}
+#elif defined(__APPLE__)
+/*
+ * On macOS, we expect the user to have the microsoft-git
+ * cask installed via Homebrew. We check using these
+ * commands:
+ *
+ * 1. 'brew update' to get latest versions.
+ * 2. 'brew upgrade --cask microsoft-git' to get the
+ * latest version.
+ */
+static int platform_specific_upgrade(void)
+{
+ int res;
+ struct strvec args = STRVEC_INIT;
+
+ printf("Updating Homebrew with 'brew update'\n");
+
+ strvec_pushl(&args, "brew", "update", NULL);
+ res = run_command_v_opt(args.v, 0);
+ strvec_clear(&args);
+
+ if (res) {
+ error(_("'brew update' failed; is brew installed?"));
+ return 1;
+ }
+
+ printf("Upgrading microsoft-git with 'brew upgrade --cask microsoft-git'\n");
+ strvec_pushl(&args, "brew", "upgrade", "--cask", "microsoft-git", NULL);
+ res = run_command_v_opt(args.v, 0);
+ strvec_clear(&args);
+
+ return res;
+}
+#else
+static int platform_specific_upgrade(void)
+{
+ error(_("update-microsoft-git is not supported on this platform"));
+ return 1;
+}
+#endif
+
+static const char builtin_update_microsoft_git_usage[] =
+ N_("git update-microsoft-git");
+
+int cmd_update_microsoft_git(int argc, const char **argv, const char *prefix)
+{
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage(builtin_update_microsoft_git_usage);
+
+ return platform_specific_upgrade();
+}
diff --git a/cache-tree.c b/cache-tree.c
index c97111cccf2eda..af11919fdd7c91 100644
--- a/cache-tree.c
+++ b/cache-tree.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "gvfs.h"
#include "lockfile.h"
#include "tree.h"
#include "tree-walk.h"
@@ -224,7 +225,7 @@ static void discard_unused_subtrees(struct cache_tree *it)
}
}
-int cache_tree_fully_valid(struct cache_tree *it)
+static int cache_tree_fully_valid_1(struct cache_tree *it)
{
int i;
if (!it)
@@ -232,7 +233,7 @@ int cache_tree_fully_valid(struct cache_tree *it)
if (it->entry_count < 0 || !has_object_file(&it->oid))
return 0;
for (i = 0; i < it->subtree_nr; i++) {
- if (!cache_tree_fully_valid(it->down[i]->cache_tree))
+ if (!cache_tree_fully_valid_1(it->down[i]->cache_tree))
return 0;
}
return 1;
@@ -243,6 +244,17 @@ static int must_check_existence(const struct cache_entry *ce)
return !(has_promisor_remote() && ce_skip_worktree(ce));
}
+int cache_tree_fully_valid(struct cache_tree *it)
+{
+ int result;
+
+ trace2_region_enter("cache_tree", "fully_valid", NULL);
+ result = cache_tree_fully_valid_1(it);
+ trace2_region_leave("cache_tree", "fully_valid", NULL);
+
+ return result;
+}
+
static int update_one(struct cache_tree *it,
struct cache_entry **cache,
int entries,
@@ -252,7 +264,8 @@ static int update_one(struct cache_tree *it,
int flags)
{
struct strbuf buffer;
- int missing_ok = flags & WRITE_TREE_MISSING_OK;
+ int missing_ok = gvfs_config_is_set(GVFS_MISSING_OK) ?
+ WRITE_TREE_MISSING_OK : (flags & WRITE_TREE_MISSING_OK);
int dryrun = flags & WRITE_TREE_DRY_RUN;
int repair = flags & WRITE_TREE_REPAIR;
int to_invalidate = 0;
@@ -421,7 +434,29 @@ static int update_one(struct cache_tree *it,
continue;
strbuf_grow(&buffer, entlen + 100);
- strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
+
+ switch (mode) {
+ case 0100644:
+ strbuf_add(&buffer, "100644 ", 7);
+ break;
+ case 0100664:
+ strbuf_add(&buffer, "100664 ", 7);
+ break;
+ case 0100755:
+ strbuf_add(&buffer, "100755 ", 7);
+ break;
+ case 0120000:
+ strbuf_add(&buffer, "120000 ", 7);
+ break;
+ case 0160000:
+ strbuf_add(&buffer, "160000 ", 7);
+ break;
+ default:
+ strbuf_addf(&buffer, "%o ", mode);
+ break;
+ }
+ strbuf_add(&buffer, path + baselen, entlen);
+ strbuf_addch(&buffer, '\0');
strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
#if DEBUG_CACHE_TREE
@@ -815,14 +850,14 @@ void prime_cache_tree(struct repository *r,
{
struct strbuf tree_path = STRBUF_INIT;
- trace2_region_enter("cache-tree", "prime_cache_tree", the_repository);
+ trace2_region_enter("cache-tree", "prime_cache_tree", r);
cache_tree_free(&istate->cache_tree);
istate->cache_tree = cache_tree();
prime_cache_tree_rec(r, istate->cache_tree, tree, &tree_path);
strbuf_release(&tree_path);
istate->cache_changed |= CACHE_TREE_CHANGED;
- trace2_region_leave("cache-tree", "prime_cache_tree", the_repository);
+ trace2_region_leave("cache-tree", "prime_cache_tree", r);
}
/*
diff --git a/cache.h b/cache.h
index e7344004f5c7e0..94d4726d424e3b 100644
--- a/cache.h
+++ b/cache.h
@@ -240,7 +240,7 @@ static inline unsigned create_ce_flags(unsigned stage)
#define ce_namelen(ce) ((ce)->ce_namelen)
#define ce_size(ce) cache_entry_size(ce_namelen(ce))
#define ce_stage(ce) ((CE_STAGEMASK & (ce)->ce_flags) >> CE_STAGESHIFT)
-#define ce_uptodate(ce) ((ce)->ce_flags & CE_UPTODATE)
+#define ce_uptodate(ce) (((ce)->ce_flags & CE_UPTODATE) || ((ce)->ce_flags & CE_FSMONITOR_VALID))
#define ce_skip_worktree(ce) ((ce)->ce_flags & CE_SKIP_WORKTREE)
#define ce_mark_uptodate(ce) ((ce)->ce_flags |= CE_UPTODATE)
#define ce_intent_to_add(ce) ((ce)->ce_flags & CE_INTENT_TO_ADD)
@@ -631,6 +631,30 @@ void set_git_work_tree(const char *tree);
#define ALTERNATE_DB_ENVIRONMENT "GIT_ALTERNATE_OBJECT_DIRECTORIES"
void setup_work_tree(void);
+
+/*
+ * discover_git_directory_reason() is similar to discover_git_directory(),
+ * except it returns an enum value instead. It is important to note that
+ * a zero-valued return here is actually GIT_DIR_NONE, which is different
+ * from discover_git_directory.
+ */
+enum discovery_result {
+ GIT_DIR_NONE = 0,
+ GIT_DIR_EXPLICIT,
+ GIT_DIR_DISCOVERED,
+ GIT_DIR_BARE,
+ /* these are errors */
+ GIT_DIR_HIT_CEILING = -1,
+ GIT_DIR_HIT_MOUNT_POINT = -2,
+ GIT_DIR_INVALID_GITFILE = -3,
+ GIT_DIR_INVALID_OWNERSHIP = -4,
+ GIT_DIR_DISALLOWED_BARE = -5,
+ GIT_DIR_INVALID_FORMAT = -6,
+ GIT_DIR_CWD_FAILURE = -7,
+};
+enum discovery_result discover_git_directory_reason(struct strbuf *commondir,
+ struct strbuf *gitdir);
+
/*
* Find the commondir and gitdir of the repository that contains the current
* working directory, without changing the working directory or other global
@@ -639,8 +663,12 @@ void setup_work_tree(void);
* both have the same result appended to the buffer. The return value is
* either 0 upon success and non-zero if no repository was found.
*/
-int discover_git_directory(struct strbuf *commondir,
- struct strbuf *gitdir);
+static inline int discover_git_directory(struct strbuf *commondir,
+ struct strbuf *gitdir)
+{
+ return discover_git_directory_reason(commondir, gitdir) <= 0;
+}
+
const char *setup_git_directory_gently(int *);
const char *setup_git_directory(void);
char *prefix_path(const char *prefix, int len, const char *path);
@@ -809,6 +837,7 @@ int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
int index_dir_exists(struct index_state *istate, const char *name, int namelen);
void adjust_dirname_case(struct index_state *istate, char *name);
struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int igncase);
+struct cache_entry *index_file_next_match(struct index_state *istate, struct cache_entry *ce, int igncase);
/*
* Searches for an entry defined by name and namelen in the given index.
@@ -1074,9 +1103,14 @@ enum fsync_method {
extern enum fsync_method fsync_method;
extern int core_preload_index;
+extern const char *core_virtualfilesystem;
+extern int core_gvfs;
extern int precomposed_unicode;
extern int protect_hfs;
extern int protect_ntfs;
+extern int core_use_gvfs_helper;
+extern const char *gvfs_cache_server_url;
+extern struct strbuf gvfs_shared_cache_pathname;
extern int core_apply_sparse_checkout;
extern int core_sparse_checkout_cone;
@@ -1094,6 +1128,8 @@ int use_optional_locks(void);
extern char comment_line_char;
extern int auto_comment_line_char;
+extern int core_virtualize_objects;
+
enum log_refs_config {
LOG_REFS_UNSET = -1,
LOG_REFS_NONE = 0,
diff --git a/compat/mingw.c b/compat/mingw.c
index 969534681a5c6e..d63bbabf9b6017 100644
--- a/compat/mingw.c
+++ b/compat/mingw.c
@@ -3969,6 +3969,8 @@ int wmain(int argc, const wchar_t **wargv)
SetConsoleCtrlHandler(handle_ctrl_c, TRUE);
+ trace2_initialize_clock();
+
maybe_redirect_std_handles();
adjust_symlink_flags();
fsync_object_files = 1;
diff --git a/config.c b/config.c
index 7176f800e1607b..2ec76f6ada59d0 100644
--- a/config.c
+++ b/config.c
@@ -7,6 +7,7 @@
*/
#include "cache.h"
#include "date.h"
+#include "gvfs.h"
#include "branch.h"
#include "config.h"
#include "environment.h"
@@ -23,6 +24,7 @@
#include "color.h"
#include "refs.h"
#include "worktree.h"
+#include "transport.h"
struct config_source {
struct config_source *prev;
@@ -1729,8 +1731,22 @@ int git_default_core_config(const char *var, const char *value, void *cb)
return 0;
}
+ if (!strcmp(var, "core.gvfs")) {
+ gvfs_load_config_value(value);
+ return 0;
+ }
+
+ if (!strcmp(var, "core.usegvfshelper")) {
+ core_use_gvfs_helper = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "core.sparsecheckout")) {
- core_apply_sparse_checkout = git_config_bool(var, value);
+ /* virtual file system relies on the sparse checkout logic so force it on */
+ if (core_virtualfilesystem)
+ core_apply_sparse_checkout = 1;
+ else
+ core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
}
@@ -1759,6 +1775,11 @@ int git_default_core_config(const char *var, const char *value, void *cb)
return 0;
}
+ if (!strcmp(var, "core.virtualizeobjects")) {
+ core_virtualize_objects = git_config_bool(var, value);
+ return 0;
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return platform_core_config(var, value, cb);
}
@@ -1862,6 +1883,35 @@ static int git_default_mailmap_config(const char *var, const char *value)
return 0;
}
+static int git_default_gvfs_config(const char *var, const char *value)
+{
+ if (!strcmp(var, "gvfs.cache-server")) {
+ const char *v2 = NULL;
+
+ if (!git_config_string(&v2, var, value) && v2 && *v2)
+ gvfs_cache_server_url = transport_anonymize_url(v2);
+ free((char*)v2);
+ return 0;
+ }
+
+ if (!strcmp(var, "gvfs.sharedcache") && value && *value) {
+ strbuf_setlen(&gvfs_shared_cache_pathname, 0);
+ strbuf_addstr(&gvfs_shared_cache_pathname, value);
+ if (strbuf_normalize_path(&gvfs_shared_cache_pathname) < 0) {
+ /*
+ * Pretend it wasn't set. This will cause us to
+ * fallback to ".git/objects" effectively.
+ */
+ strbuf_release(&gvfs_shared_cache_pathname);
+ return 0;
+ }
+ strbuf_trim_trailing_dir_sep(&gvfs_shared_cache_pathname);
+ return 0;
+ }
+
+ return 0;
+}
+
int git_default_config(const char *var, const char *value, void *cb)
{
if (starts_with(var, "core."))
@@ -1911,6 +1961,9 @@ int git_default_config(const char *var, const char *value, void *cb)
if (starts_with(var, "sparse."))
return git_default_sparse_config(var, value);
+ if (starts_with(var, "gvfs."))
+ return git_default_gvfs_config(var, value);
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
@@ -2789,6 +2842,44 @@ int git_config_get_max_percent_split_change(void)
return -1; /* default value */
}
+int git_config_get_virtualfilesystem(void)
+{
+ /* Run only once. */
+ static int virtual_filesystem_result = -1;
+ if (virtual_filesystem_result >= 0)
+ return virtual_filesystem_result;
+
+ if (git_config_get_pathname("core.virtualfilesystem", &core_virtualfilesystem))
+ core_virtualfilesystem = getenv("GIT_VIRTUALFILESYSTEM_TEST");
+
+ if (core_virtualfilesystem && !*core_virtualfilesystem)
+ core_virtualfilesystem = NULL;
+
+ if (core_virtualfilesystem) {
+ /*
+ * Some git commands spawn helpers and redirect the index to a different
+ * location. These include "difftool -d" and the sequencer
+ * (i.e. `git rebase -i`, `git cherry-pick` and `git revert`) and others.
+ * In those instances we don't want to update their temporary index with
+ * our virtualization data.
+ */
+ char *default_index_file = xstrfmt("%s/%s", the_repository->gitdir, "index");
+ int should_run_hook = !strcmp(default_index_file, the_repository->index_file);
+
+ free(default_index_file);
+ if (should_run_hook) {
+ /* virtual file system relies on the sparse checkout logic so force it on */
+ core_apply_sparse_checkout = 1;
+ virtual_filesystem_result = 1;
+ return 1;
+ }
+ core_virtualfilesystem = NULL;
+ }
+
+ virtual_filesystem_result = 0;
+ return 0;
+}
+
int git_config_get_index_threads(int *dest)
{
int is_bool, val;
@@ -3202,6 +3293,7 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
const char *value_pattern,
unsigned flags)
{
+ static unsigned long timeout_ms = ULONG_MAX;
int fd = -1, in_fd = -1;
int ret;
struct lock_file lock = LOCK_INIT;
@@ -3222,11 +3314,16 @@ int git_config_set_multivar_in_file_gently(const char *config_filename,
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
+ if ((long)timeout_ms < 0 &&
+ git_config_get_ulong("core.configWriteLockTimeoutMS", &timeout_ms))
+ timeout_ms = 0;
+
/*
* The lock serves a purpose in addition to locking: the new
* contents of .git/config will be written into it.
*/
- fd = hold_lock_file_for_update(&lock, config_filename, 0);
+ fd = hold_lock_file_for_update_timeout(&lock, config_filename, 0,
+ timeout_ms);
if (fd < 0) {
error_errno(_("could not lock config file %s"), config_filename);
ret = CONFIG_NO_LOCK;
diff --git a/config.h b/config.h
index fe5b7a1ae639b5..e4ebca2970ef3b 100644
--- a/config.h
+++ b/config.h
@@ -614,6 +614,7 @@ int git_config_get_pathname(const char *key, const char **dest);
int git_config_get_index_threads(int *dest);
int git_config_get_split_index(void);
int git_config_get_max_percent_split_change(void);
+int git_config_get_virtualfilesystem(void);
/* This dies if the configured or default date is in the future */
int git_config_get_expiry(const char *key, const char **output);
diff --git a/connected.c b/connected.c
index 74a20cb32e7c7b..9cde030ece78e3 100644
--- a/connected.c
+++ b/connected.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "gvfs.h"
#include "object-store.h"
#include "run-command.h"
#include "sigchain.h"
@@ -30,6 +31,26 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
struct transport *transport;
size_t base_len;
+ /*
+ * Running a virtual file system there will be objects that are
+ * missing locally and we don't want to download a bunch of
+ * commits, trees, and blobs just to make sure everything is
+ * reachable locally so this option will skip reachablility
+ * checks below that use rev-list. This will stop the check
+ * before uploadpack runs to determine if there is anything to
+ * fetch. Returning zero for the first check will also prevent the
+ * uploadpack from happening. It will also skip the check after
+ * the fetch is finished to make sure all the objects where
+ * downloaded in the pack file. This will allow the fetch to
+ * run and get all the latest tip commit ids for all the branches
+ * in the fetch but not pull down commits, trees, or blobs via
+ * upload pack.
+ */
+ if (gvfs_config_is_set(GVFS_FETCH_SKIP_REACHABILITY_AND_UPLOADPACK))
+ return 0;
+ if (core_virtualize_objects)
+ return 0;
+
if (!opt)
opt = &defaults;
transport = opt->transport;
diff --git a/contrib/buildsystems/CMakeLists.txt b/contrib/buildsystems/CMakeLists.txt
index 4f366765f75f19..59a13278331a26 100644
--- a/contrib/buildsystems/CMakeLists.txt
+++ b/contrib/buildsystems/CMakeLists.txt
@@ -643,7 +643,7 @@ if(NOT CURL_FOUND)
add_compile_definitions(NO_CURL)
message(WARNING "git-http-push and git-http-fetch will not be built")
else()
- list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http)
+ list(APPEND PROGRAMS_BUILT git-http-fetch git-http-push git-imap-send git-remote-http git-gvfs-helper)
if(CURL_VERSION_STRING VERSION_GREATER_EQUAL 7.34.0)
add_compile_definitions(USE_CURL_FOR_IMAP_SEND)
endif()
@@ -792,7 +792,7 @@ target_link_libraries(git-sh-i18n--envsubst common-main)
add_executable(git-shell ${CMAKE_SOURCE_DIR}/shell.c)
target_link_libraries(git-shell common-main)
-add_executable(scalar ${CMAKE_SOURCE_DIR}/scalar.c)
+add_executable(scalar ${CMAKE_SOURCE_DIR}/scalar.c ${CMAKE_SOURCE_DIR}/json-parser.c)
target_link_libraries(scalar common-main)
if(CURL_FOUND)
@@ -811,6 +811,9 @@ if(CURL_FOUND)
add_executable(git-http-push ${CMAKE_SOURCE_DIR}/http-push.c)
target_link_libraries(git-http-push http_obj common-main ${CURL_LIBRARIES} ${EXPAT_LIBRARIES})
endif()
+
+ add_executable(git-gvfs-helper ${CMAKE_SOURCE_DIR}/gvfs-helper.c)
+ target_link_libraries(git-gvfs-helper http_obj common-main ${CURL_LIBRARIES} )
endif()
parse_makefile_for_executables(git_builtin_extra "BUILT_INS")
@@ -1020,6 +1023,20 @@ set(wrapper_scripts
set(wrapper_test_scripts
test-fake-ssh test-tool)
+if(CURL_FOUND)
+ list(APPEND wrapper_test_scripts test-gvfs-protocol)
+
+ add_executable(test-gvfs-protocol ${CMAKE_SOURCE_DIR}/t/helper/test-gvfs-protocol.c)
+ target_link_libraries(test-gvfs-protocol common-main)
+
+ if(MSVC)
+ set_target_properties(test-gvfs-protocol
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/t/helper)
+ set_target_properties(test-gvfs-protocol
+ PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/t/helper)
+ endif()
+endif()
+
foreach(script ${wrapper_scripts})
file(STRINGS ${CMAKE_SOURCE_DIR}/wrap-for-bin.sh content NEWLINE_CONSUME)
diff --git a/contrib/completion/git-completion.bash b/contrib/completion/git-completion.bash
index ba5c395d2d804f..4bb5b28fd266d3 100644
--- a/contrib/completion/git-completion.bash
+++ b/contrib/completion/git-completion.bash
@@ -1655,7 +1655,7 @@ _git_clone ()
esac
}
-__git_untracked_file_modes="all no normal"
+__git_untracked_file_modes="all no normal complete"
_git_commit ()
{
diff --git a/contrib/long-running-read-object/example.pl b/contrib/long-running-read-object/example.pl
new file mode 100644
index 00000000000000..b8f37f836a813c
--- /dev/null
+++ b/contrib/long-running-read-object/example.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl
+#
+# Example implementation for the Git read-object protocol version 1
+# See Documentation/technical/read-object-protocol.txt
+#
+# Allows you to test the ability for blobs to be pulled from a host git repo
+# "on demand." Called when git needs a blob it couldn't find locally due to
+# a lazy clone that only cloned the commits and trees.
+#
+# A lazy clone can be simulated via the following commands from the host repo
+# you wish to create a lazy clone of:
+#
+# cd /host_repo
+# git rev-parse HEAD
+# git init /guest_repo
+# git cat-file --batch-check --batch-all-objects | grep -v 'blob' |
+# cut -d' ' -f1 | git pack-objects /guest_repo/.git/objects/pack/noblobs
+# cd /guest_repo
+# git config core.virtualizeobjects true
+# git reset --hard
+#
+# Please note, this sample is a minimal skeleton. No proper error handling
+# was implemented.
+#
+
+use strict;
+use warnings;
+
+#
+# Point $DIR to the folder where your host git repo is located so we can pull
+# missing objects from it
+#
+my $DIR = "/host_repo/.git/";
+
+sub packet_bin_read {
+ my $buffer;
+ my $bytes_read = read STDIN, $buffer, 4;
+ if ( $bytes_read == 0 ) {
+
+ # EOF - Git stopped talking to us!
+ exit();
+ }
+ elsif ( $bytes_read != 4 ) {
+ die "invalid packet: '$buffer'";
+ }
+ my $pkt_size = hex($buffer);
+ if ( $pkt_size == 0 ) {
+ return ( 1, "" );
+ }
+ elsif ( $pkt_size > 4 ) {
+ my $content_size = $pkt_size - 4;
+ $bytes_read = read STDIN, $buffer, $content_size;
+ if ( $bytes_read != $content_size ) {
+ die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+ }
+ return ( 0, $buffer );
+ }
+ else {
+ die "invalid packet size: $pkt_size";
+ }
+}
+
+sub packet_txt_read {
+ my ( $res, $buf ) = packet_bin_read();
+ unless ( $buf =~ s/\n$// ) {
+ die "A non-binary line MUST be terminated by an LF.";
+ }
+ return ( $res, $buf );
+}
+
+sub packet_bin_write {
+ my $buf = shift;
+ print STDOUT sprintf( "%04x", length($buf) + 4 );
+ print STDOUT $buf;
+ STDOUT->flush();
+}
+
+sub packet_txt_write {
+ packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+ print STDOUT sprintf( "%04x", 0 );
+ STDOUT->flush();
+}
+
+( packet_txt_read() eq ( 0, "git-read-object-client" ) ) || die "bad initialize";
+( packet_txt_read() eq ( 0, "version=1" ) ) || die "bad version";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad version end";
+
+packet_txt_write("git-read-object-server");
+packet_txt_write("version=1");
+packet_flush();
+
+( packet_txt_read() eq ( 0, "capability=get" ) ) || die "bad capability";
+( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end";
+
+packet_txt_write("capability=get");
+packet_flush();
+
+while (1) {
+ my ($command) = packet_txt_read() =~ /^command=([^=]+)$/;
+
+ if ( $command eq "get" ) {
+ my ($sha1) = packet_txt_read() =~ /^sha1=([0-9a-f]{40})$/;
+ packet_bin_read();
+
+ system ('git --git-dir="' . $DIR . '" cat-file blob ' . $sha1 . ' | git -c core.virtualizeobjects=false hash-object -w --stdin >/dev/null 2>&1');
+ packet_txt_write(($?) ? "status=error" : "status=success");
+ packet_flush();
+ } else {
+ die "bad command '$command'";
+ }
+}
diff --git a/contrib/scalar/docs/faq.md b/contrib/scalar/docs/faq.md
new file mode 100644
index 00000000000000..a14f78a996d5d5
--- /dev/null
+++ b/contrib/scalar/docs/faq.md
@@ -0,0 +1,51 @@
+Frequently Asked Questions
+==========================
+
+Using Scalar
+------------
+
+### I don't want a sparse clone, I want every file after I clone!
+
+Run `scalar clone --full-clone ` to initialize your repo to include
+every file. You can switch to a sparse-checkout later by running
+`git sparse-checkout init --cone`.
+
+### I already cloned without `--full-clone`. How do I get everything?
+
+Run `git sparse-checkout disable`.
+
+Scalar Design Decisions
+-----------------------
+
+There may be many design decisions within Scalar that are confusing at first
+glance. Some of them may cause friction when you use Scalar with your existing
+repos and existing habits.
+
+> Scalar has the most benefit when users design repositories
+> with efficient patterns.
+
+For example: Scalar uses the sparse-checkout feature to limit the size of the
+working directory within a large monorepo. It is designed to work efficiently
+with monorepos that are highly componentized, allowing most developers to
+need many fewer files in their daily work.
+
+### Why does `scalar clone` create a `/src` folder?
+
+Scalar uses a file system watcher to keep track of changes under this `src` folder.
+Any activity in this folder is assumed to be important to Git operations. By
+creating the `src` folder, we are making it easy for your build system to
+create output folders outside the `src` directory. We commonly see systems
+create folders for build outputs and package downloads. Scalar itself creates
+these folders during its builds.
+
+Your build system may create build artifacts such as `.obj` or `.lib` files
+next to your source code. These are commonly "hidden" from Git using
+`.gitignore` files. Having such artifacts in your source tree creates
+additional work for Git because it needs to look at these files and match them
+against the `.gitignore` patterns.
+
+By following the `src` pattern Scalar tries to establish and placing your build
+intermediates and outputs parallel with the `src` folder and not inside it,
+you can help optimize Git command performance for developers in the repository
+by limiting the number of files Git needs to consider for many common
+operations.
diff --git a/contrib/scalar/docs/getting-started.md b/contrib/scalar/docs/getting-started.md
new file mode 100644
index 00000000000000..d5125330320d2c
--- /dev/null
+++ b/contrib/scalar/docs/getting-started.md
@@ -0,0 +1,109 @@
+Getting Started
+===============
+
+Registering existing Git repos
+------------------------------
+
+To add a repository to the list of registered repos, run `scalar register []`.
+If `` is not provided, then the "current repository" is discovered from
+the working directory by scanning the parent paths for a path containing a `.git`
+folder, possibly inside a `src` folder.
+
+To see which repositories are currently tracked by the service, run
+`scalar list`.
+
+Run `scalar unregister []` to remove the repo from this list.
+
+Creating a new Scalar clone
+---------------------------------------------------
+
+The `clone` verb creates a local enlistment of a remote repository using the
+partial clone feature available e.g. on GitHub, or using the
+[GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md),
+such as Azure Repos.
+
+```
+scalar clone [options] []
+```
+
+Create a local copy of the repository at ``. If specified, create the ``
+directory and place the repository there. Otherwise, the last section of the ``
+will be used for ``.
+
+At the end, the repo is located at `/src`. By default, the sparse-checkout
+feature is enabled and the only files present are those in the root of your
+Git repository. Use `git sparse-checkout set` to expand the set of directories
+you want to see, or `git sparse-checkout disable` to expand to all files. You
+can explore the subdirectories outside your sparse-checkout specification using
+`git ls-tree HEAD`.
+
+### Sparse Repo Mode
+
+By default, Scalar reduces your working directory to only the files at the
+root of the repository. You need to add the folders you care about to build up
+to your working set.
+
+* `scalar clone `
+ * Please choose the **Clone with HTTPS** option in the `Clone Repository` dialog in Azure Repos, not **Clone with SSH**.
+* `cd \src`
+* At this point, your `src` directory only contains files that appear in your root
+ tree. No folders are populated.
+* Set the directory list for your sparse-checkout using:
+ 1. `git sparse-checkout set ...`
+ 2. `git sparse-checkout set --stdin < dir-list.txt`
+* Run git commands as you normally would.
+* To fully populate your working directory, run `git sparse-checkout disable`.
+
+If instead you want to start with all files on-disk, you can clone with the
+`--full-clone` option. To enable sparse-checkout after the fact, run
+`git sparse-checkout init --cone`. This will initialize your sparse-checkout
+patterns to only match the files at root.
+
+If you are unfamiliar with what directories are available in the repository,
+then you can run `git ls-tree -d --name-only HEAD` to discover the directories
+at root, or `git ls-tree -d --name-only HEAD ` to discover the directories
+in ``.
+
+### Options
+
+These options allow a user to customize their initial enlistment.
+
+* `--full-clone`: If specified, do not initialize the sparse-checkout feature.
+ All files will be present in your `src` directory. This behaves very similar
+ to a Git partial clone in that blobs are downloaded on demand. However, it
+ will use the GVFS protocol to download all Git objects.
+
+* `--cache-server-url=`: If specified, set the intended cache server to
+ the specified ``. All object queries will use the GVFS protocol to this
+ `` instead of the origin remote. If the remote supplies a list of
+ cache servers via the `/gvfs/config` endpoint, then the `clone` command
+ will select a nearby cache server from that list.
+
+* `--branch=[`: Specify the branch to checkout after clone.
+
+* `--local-cache-path=`: Use this option to override the path for the
+ local Scalar cache. If not specified, then Scalar will select a default
+ path to share objects with your other enlistments. On Windows, this path
+ is a subdirectory of `:\.scalarCache\`. On Mac, this path is a
+ subdirectory of `~/.scalarCache/`. The default cache path is recommended so
+ multiple enlistments of the same remote repository share objects on the
+ same device.
+
+### Advanced Options
+
+The options below are not intended for use by a typical user. These are
+usually used by build machines to create a temporary enlistment that
+operates on a single commit.
+
+* `--single-branch`: Use this option to only download metadata for the branch
+ that will be checked out. This is helpful for build machines that target
+ a remote with many branches. Any `git fetch` commands after the clone will
+ still ask for all branches.
+
+Removing a Scalar Clone
+-----------------------
+
+Since the `scalar clone` command sets up a file-system watcher (when available),
+that watcher could prevent deleting the enlistment. Run `scalar delete `
+from outside of your enlistment to unregister the enlistment from the filesystem
+watcher and delete the enlistment at ``.
diff --git a/contrib/scalar/docs/index.md b/contrib/scalar/docs/index.md
new file mode 100644
index 00000000000000..4f56e2b0ebbac6
--- /dev/null
+++ b/contrib/scalar/docs/index.md
@@ -0,0 +1,54 @@
+Scalar: Enabling Git at Scale
+=============================
+
+Scalar is a tool that helps Git scale to some of the largest Git repositories.
+It achieves this by enabling some advanced Git features, such as:
+
+* *Partial clone:* reduces time to get a working repository by not
+ downloading all Git objects right away.
+
+* *Background prefetch:* downloads Git object data from all remotes every
+ hour, reducing the amount of time for foreground `git fetch` calls.
+
+* *Sparse-checkout:* limits the size of your working directory.
+
+* *File system monitor:* tracks the recently modified files and eliminates
+ the need for Git to scan the entire worktree.
+
+* *Commit-graph:* accelerates commit walks and reachability calculations,
+ speeding up commands like `git log`.
+
+* *Multi-pack-index:* enables fast object lookups across many pack-files.
+
+* *Incremental repack:* Repacks the packed Git data into fewer pack-file
+ without disrupting concurrent commands by using the multi-pack-index.
+
+By running `scalar register` in any Git repo, Scalar will automatically enable
+these features for that repo (except partial clone) and start running suggested
+maintenance in the background using
+[the `git maintenance` feature](https://git-scm.com/docs/git-maintenance).
+
+Repos cloned with the `scalar clone` command use partial clone or the
+[GVFS protocol](https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md)
+to significantly reduce the amount of data required to get started
+using a repository. By delaying all blob downloads until they are required,
+Scalar allows you to work with very large repositories quickly. The GVFS
+protocol allows a network of _cache servers_ to serve objects with lower
+latency and higher throughput. The cache servers also reduce load on the
+central server.
+
+Documentation
+-------------
+
+* [Getting Started](getting-started.md): Get started with Scalar.
+ Includes `scalar register`, `scalar unregister`, `scalar clone`, and
+ `scalar delete`.
+
+* [Troubleshooting](troubleshooting.md):
+ Collect diagnostic information or update custom settings. Includes
+ `scalar diagnose` and `scalar cache-server`.
+
+* [The Philosophy of Scalar](philosophy.md): Why does Scalar work the way
+ it does, and how do we make decisions about its future?
+
+* [Frequently Asked Questions](faq.md)
diff --git a/contrib/scalar/docs/philosophy.md b/contrib/scalar/docs/philosophy.md
new file mode 100644
index 00000000000000..e3dfa025a2504c
--- /dev/null
+++ b/contrib/scalar/docs/philosophy.md
@@ -0,0 +1,71 @@
+The Philosophy of Scalar
+========================
+
+The team building Scalar has **opinions** about Git performance. Scalar
+takes out the guesswork by automatically configuring your Git repositories
+to take advantage of the latest and greatest features. It is difficult to
+say that these are the absolute best settings for every repository, but
+these settings do work for some of the largest repositories in the world.
+
+Scalar intends to do very little more than the standard Git client. We
+actively implement new features into Git instead of Scalar, then update
+Scalar only to configure those new settings. In particular, we ported
+features like background maintenance to Git to make Scalar simpler and
+make Git more powerful.
+
+Scalar ships inside [a custom version of Git][microsoft-git], but we are
+working to make it available in other forks of Git. The only feature
+that is not intended to ever reach the standard Git client is Scalar's use
+of [the GVFS Protocol][gvfs-protocol], which is essentially an older
+version of [Git's partial clone feature](https://github.blog/2020-12-21-get-up-to-speed-with-partial-clone-and-shallow-clone/)
+that was available first in Azure Repos. Services such as GitHub support
+only partial clone instead of the GVFS protocol because that is the
+standard adopted by the Git project. If your hosting service supports
+partial clone, then we absolutely recommend it as a way to greatly speed
+up your clone and fetch times and to reduce how much disk space your Git
+repository requires. Scalar will help with this!
+
+If you don't use the GVFS Protocol, then most of the value of Scalar can
+be found in the core Git client. However, most of the advanced features
+that really optimize Git's performance are off by default for compatibility
+reasons. To really take advantage of Git's latest and greatest features,
+you either need to study the [`git config` documentation](https://git-scm.com/docs/git-config)
+and regularly read [the Git release notes](https://github.com/git/git/tree/master/Documentation/RelNotes).
+Even if you do all that work and customize your Git settings on your machines,
+you likely will want to share those settings with other team members.
+Or, you can just use Scalar!
+
+Using `scalar register` on an existing Git repository will give you these
+benefits:
+
+* Additional compression of your `.git/index` file.
+* Hourly background `git fetch` operations, keeping you in-sync with your
+ remotes.
+* Advanced data structures, such as the `commit-graph` and `multi-pack-index`
+ are updated automatically in the background.
+* If using macOS or Windows, then Scalar configures Git's builtin File System
+ Monitor, providing faster commands such as `git status` or `git add`.
+
+Additionally, if you use `scalar clone` to create a new repository, then
+you will automatically get these benefits:
+
+* Use Git's partial clone feature to only download the files you need for
+ your current checkout.
+* Use Git's [sparse-checkout feature][sparse-checkout] to minimize the
+ number of files required in your working directory.
+ [Read more about sparse-checkout here.][sparse-checkout-blog]
+* Create the Git repository inside `/src` to make it easy to
+ place build artifacts outside of the Git repository, such as in
+ `/bin` or `/packages`.
+
+We also admit that these **opinions** can always be improved! If you have
+an idea of how to improve our setup, consider
+[creating an issue](https://github.com/microsoft/scalar/issues/new) or
+contributing a pull request! Some [existing](https://github.com/microsoft/scalar/issues/382)
+[issues](https://github.com/microsoft/scalar/issues/388) have already
+improved our configuration settings and roadmap!
+
+[gvfs-protocol]: https://github.com/microsoft/VFSForGit/blob/HEAD/Protocol.md
+[microsoft-git]: https://github.com/microsoft/git
+[sparse-checkout]: https://git-scm.com/docs/git-sparse-checkout
+[sparse-checkout-blog]: https://github.blog/2020-01-17-bring-your-monorepo-down-to-size-with-sparse-checkout/
diff --git a/contrib/scalar/docs/troubleshooting.md b/contrib/scalar/docs/troubleshooting.md
new file mode 100644
index 00000000000000..c54d2438f22523
--- /dev/null
+++ b/contrib/scalar/docs/troubleshooting.md
@@ -0,0 +1,40 @@
+Troubleshooting
+===============
+
+Diagnosing Issues
+-----------------
+
+The `scalar diagnose` command collects logs and config details for the current
+repository. The resulting zip file helps root-cause issues.
+
+When run inside your repository, creates a zip file containing several important
+files for that repository. This includes:
+
+* Configuration files from your `.git` folder, such as the `config` file,
+ `index`, `hooks`, and `refs`.
+
+* A summary of your Git object database, including the number of loose objects
+ and the names and sizes of pack-files.
+
+As the `diagnose` command completes, it provides the path of the resulting
+zip file. This zip can be attached to bug reports to make the analysis easier.
+
+Modifying Configuration Values
+------------------------------
+
+The Scalar-specific configuration is only available for repos using the
+GVFS protocol.
+
+### Cache Server URL
+
+When using an enlistment cloned with `scalar clone` and the GVFS protocol,
+you will have a value called the cache server URL. Cache servers are a feature
+of the GVFS protocol to provide low-latency access to the on-demand object
+requests. This modifies the `gvfs.cache-server` setting in your local Git config
+file.
+
+Run `scalar cache-server --get` to see the current cache server.
+
+Run `scalar cache-server --list` to see the available cache server URLs.
+
+Run `scalar cache-server --set=` to set your cache server to ``.
diff --git a/convert.c b/convert.c
index 95e6a5244fc26c..7707038b1580b3 100644
--- a/convert.c
+++ b/convert.c
@@ -1,4 +1,5 @@
#include "cache.h"
+#include "gvfs.h"
#include "config.h"
#include "object-store.h"
#include "attr.h"
@@ -548,6 +549,9 @@ static int crlf_to_git(struct index_state *istate,
if (!buf)
return 1;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("CRLF conversions not supported when running under GVFS");
+
/* only grow if not in place */
if (strbuf_avail(buf) + buf->len < len)
strbuf_grow(buf, len - buf->len);
@@ -587,6 +591,9 @@ static int crlf_to_worktree(const char *src, size_t len, struct strbuf *buf,
if (!will_convert_lf_to_crlf(&stats, crlf_action))
return 0;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("CRLF conversions not supported when running under GVFS");
+
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
@@ -698,6 +705,9 @@ static int apply_single_file_filter(const char *path, const char *src, size_t le
struct async async;
struct filter_params params;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("Filter \"%s\" not supported when running under GVFS", cmd);
+
memset(&async, 0, sizeof(async));
async.proc = filter_buffer_or_fd;
async.data = ¶ms;
@@ -1109,6 +1119,9 @@ static int ident_to_git(const char *src, size_t len,
if (!buf)
return 1;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("ident conversions not supported when running under GVFS");
+
/* only grow if not in place */
if (strbuf_avail(buf) + buf->len < len)
strbuf_grow(buf, len - buf->len);
@@ -1156,6 +1169,9 @@ static int ident_to_worktree(const char *src, size_t len,
if (!cnt)
return 0;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("ident conversions not supported when running under GVFS");
+
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
@@ -1605,6 +1621,9 @@ static int lf_to_crlf_filter_fn(struct stream_filter *filter,
size_t count, o = 0;
struct lf_to_crlf_filter *lf_to_crlf = (struct lf_to_crlf_filter *)filter;
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("CRLF conversions not supported when running under GVFS");
+
/*
* We may be holding onto the CR to see if it is followed by a
* LF, in which case we would need to go to the main loop.
@@ -1849,6 +1868,9 @@ static int ident_filter_fn(struct stream_filter *filter,
struct ident_filter *ident = (struct ident_filter *)filter;
static const char head[] = "$Id";
+ if (gvfs_config_is_set(GVFS_BLOCK_FILTERS_AND_EOL_CONVERSIONS))
+ die("ident conversions not supported when running under GVFS");
+
if (!input) {
/* drain upon eof */
switch (ident->state) {
diff --git a/credential.c b/credential.c
index f6389a50684a6e..2c981321ca2b12 100644
--- a/credential.c
+++ b/credential.c
@@ -286,6 +286,8 @@ static int run_credential_helper(struct credential *c,
else
helper.no_stdout = 1;
+ helper.trace2_child_class = "cred";
+
if (start_command(&helper) < 0)
return -1;
diff --git a/csum-file.c b/csum-file.c
index 59ef3398ca2b01..36e581588764b6 100644
--- a/csum-file.c
+++ b/csum-file.c
@@ -45,7 +45,8 @@ void hashflush(struct hashfile *f)
unsigned offset = f->offset;
if (offset) {
- the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
+ if (!f->skip_hash)
+ the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
@@ -64,7 +65,16 @@ int finalize_hashfile(struct hashfile *f, unsigned char *result,
int fd;
hashflush(f);
- the_hash_algo->final_fn(f->buffer, &f->ctx);
+
+ /*
+ * If we skip the hash function, be sure to create an empty hash
+ * for the results.
+ */
+ if (f->skip_hash)
+ memset(f->buffer, 0, the_hash_algo->rawsz);
+ else
+ the_hash_algo->final_fn(f->buffer, &f->ctx);
+
if (result)
hashcpy(result, f->buffer);
if (flags & CSUM_HASH_IN_STREAM)
@@ -158,6 +168,7 @@ static struct hashfile *hashfd_internal(int fd, const char *name,
f->buffer_len = buffer_len;
f->buffer = xmalloc(buffer_len);
f->check_buffer = NULL;
+ f->skip_hash = 0;
return f;
}
diff --git a/csum-file.h b/csum-file.h
index 0d29f528fbcb51..1dfd21d72fdd93 100644
--- a/csum-file.h
+++ b/csum-file.h
@@ -20,6 +20,13 @@ struct hashfile {
size_t buffer_len;
unsigned char *buffer;
unsigned char *check_buffer;
+
+ /*
+ * If set to 1, skip_hash indicates that we should
+ * not actually compute the hash for this hashfile and
+ * instead only use it as a buffered write.
+ */
+ int skip_hash;
};
/* Checkpoint */
diff --git a/diagnose.c b/diagnose.c
index 8f2656989666b6..52c4df114d5d04 100644
--- a/diagnose.c
+++ b/diagnose.c
@@ -7,6 +7,7 @@
#include "strvec.h"
#include "object-store.h"
#include "packfile.h"
+#include "config.h"
struct archive_dir {
const char *path;
@@ -102,6 +103,39 @@ static unsigned char get_dtype(struct dirent *e, struct strbuf *path)
return dtype;
}
+static void dir_stats(struct strbuf *buf, const char *path)
+{
+ DIR *dir = opendir(path);
+ struct dirent *e;
+ struct stat e_stat;
+ struct strbuf file_path = STRBUF_INIT;
+ size_t base_path_len;
+
+ if (!dir)
+ return;
+
+ strbuf_addstr(buf, "Contents of ");
+ strbuf_add_absolute_path(buf, path);
+ strbuf_addstr(buf, ":\n");
+
+ strbuf_add_absolute_path(&file_path, path);
+ strbuf_addch(&file_path, '/');
+ base_path_len = file_path.len;
+
+ while ((e = readdir(dir)) != NULL)
+ if (!is_dot_or_dotdot(e->d_name) && e->d_type == DT_REG) {
+ strbuf_setlen(&file_path, base_path_len);
+ strbuf_addstr(&file_path, e->d_name);
+ if (!stat(file_path.buf, &e_stat))
+ strbuf_addf(buf, "%-70s %16"PRIuMAX"\n",
+ e->d_name,
+ (uintmax_t)e_stat.st_size);
+ }
+
+ strbuf_release(&file_path);
+ closedir(dir);
+}
+
static int count_files(struct strbuf *path)
{
DIR *dir = opendir(path->buf);
@@ -214,7 +248,8 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
struct strvec archiver_args = STRVEC_INIT;
char **argv_copy = NULL;
int stdout_fd = -1, archiver_fd = -1;
- struct strbuf buf = STRBUF_INIT;
+ char *cache_server_url = NULL, *shared_cache = NULL;
+ struct strbuf buf = STRBUF_INIT, path = STRBUF_INIT;
int res, i;
struct archive_dir archive_dirs[] = {
{ ".git", 0 },
@@ -249,6 +284,13 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
get_version_info(&buf, 1);
strbuf_addf(&buf, "Repository root: %s\n", the_repository->worktree);
+
+ git_config_get_string("gvfs.cache-server", &cache_server_url);
+ git_config_get_string("gvfs.sharedCache", &shared_cache);
+ strbuf_addf(&buf, "Cache Server: %s\nLocal Cache: %s\n\n",
+ cache_server_url ? cache_server_url : "None",
+ shared_cache ? shared_cache : "None");
+
get_disk_info(&buf);
write_or_die(stdout_fd, buf.buf, buf.len);
strvec_pushf(&archiver_args,
@@ -279,6 +321,41 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
}
}
+ if (shared_cache) {
+ strbuf_reset(&buf);
+ strbuf_addf(&path, "%s/pack", shared_cache);
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "--add-virtual-file=packs-cached.txt:");
+ dir_stats(&buf, path.buf);
+ strvec_push(&archiver_args, buf.buf);
+
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, "--add-virtual-file=objects-cached.txt:");
+ loose_objs_stats(&buf, shared_cache);
+ strvec_push(&archiver_args, buf.buf);
+
+ strbuf_reset(&path);
+ strbuf_addf(&path, "%s/info", shared_cache);
+ if (is_directory(path.buf)) {
+ DIR *dir = opendir(path.buf);
+ struct dirent *e;
+
+ while ((e = readdir(dir))) {
+ if (!strcmp(".", e->d_name) || !strcmp("..", e->d_name))
+ continue;
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "--add-virtual-file=info/%s:", e->d_name);
+ if (strbuf_read_file(&buf, path.buf, 0) < 0) {
+ res = error_errno(_("could not read '%s'"), path.buf);
+ goto diagnose_cleanup;
+ }
+ strvec_push(&archiver_args, buf.buf);
+ }
+ closedir(dir);
+ }
+ }
+
strvec_pushl(&archiver_args, "--prefix=",
oid_to_hex(the_hash_algo->empty_tree), "--", NULL);
@@ -292,10 +369,13 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
goto diagnose_cleanup;
}
- fprintf(stderr, "\n"
- "Diagnostics complete.\n"
- "All of the gathered info is captured in '%s'\n",
- zip_path->buf);
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "\n"
+ "Diagnostics complete.\n"
+ "All of the gathered info is captured in '%s'\n",
+ zip_path->buf);
+ write_or_die(stdout_fd, buf.buf, buf.len);
+ write_or_die(2, buf.buf, buf.len);
diagnose_cleanup:
if (archiver_fd >= 0) {
@@ -306,6 +386,8 @@ int create_diagnostics_archive(struct strbuf *zip_path, enum diagnose_mode mode)
free(argv_copy);
strvec_clear(&archiver_args);
strbuf_release(&buf);
+ free(cache_server_url);
+ free(shared_cache);
return res;
}
diff --git a/diff.c b/diff.c
index 648f6717a5597c..56d552bc8363b5 100644
--- a/diff.c
+++ b/diff.c
@@ -3922,6 +3922,13 @@ static int reuse_worktree_file(struct index_state *istate,
if (!FAST_WORKING_DIRECTORY && !want_file && has_object_pack(oid))
return 0;
+ /*
+ * If this path does not match our sparse-checkout definition,
+ * then the file will not be in the working directory.
+ */
+ if (!path_in_sparse_checkout(name, istate))
+ return 0;
+
/*
* Similarly, if we'd have to convert the file contents anyway, that
* makes the optimization not worthwhile.
diff --git a/dir.c b/dir.c
index 08d560657486b0..c4c00c2ad03c74 100644
--- a/dir.c
+++ b/dir.c
@@ -6,6 +6,7 @@
* Junio Hamano, 2005-2006
*/
#include "cache.h"
+#include "virtualfilesystem.h"
#include "config.h"
#include "dir.h"
#include "object-store.h"
@@ -1414,6 +1415,17 @@ enum pattern_match_result path_matches_pattern_list(
int result = NOT_MATCHED;
size_t slash_pos;
+ /*
+ * The virtual file system data is used to prevent git from traversing
+ * any part of the tree that is not in the virtual file system. Return
+ * 1 to exclude the entry if it is not found in the virtual file system,
+ * else fall through to the regular excludes logic as it may further exclude.
+ */
+ if (*dtype == DT_UNKNOWN)
+ *dtype = resolve_dtype(DT_UNKNOWN, istate, pathname, pathlen);
+ if (is_excluded_from_virtualfilesystem(pathname, pathlen, *dtype) > 0)
+ return 1;
+
if (!pl->use_cone_patterns) {
pattern = last_matching_pattern_from_list(pathname, pathlen, basename,
dtype, pl, istate);
@@ -1503,6 +1515,13 @@ static int path_in_sparse_checkout_1(const char *path,
enum pattern_match_result match = UNDECIDED;
const char *end, *slash;
+ /*
+ * When using a virtual filesystem, there aren't really patterns
+ * to follow, but be extra careful to skip this check.
+ */
+ if (core_virtualfilesystem)
+ return 1;
+
/*
* We default to accepting a path if the path is empty, there are no
* patterns, or the patterns are of the wrong type.
@@ -1757,8 +1776,20 @@ struct path_pattern *last_matching_pattern(struct dir_struct *dir,
int is_excluded(struct dir_struct *dir, struct index_state *istate,
const char *pathname, int *dtype_p)
{
- struct path_pattern *pattern =
- last_matching_pattern(dir, istate, pathname, dtype_p);
+ struct path_pattern *pattern;
+
+ /*
+ * The virtual file system data is used to prevent git from traversing
+ * any part of the tree that is not in the virtual file system. Return
+ * 1 to exclude the entry if it is not found in the virtual file system,
+ * else fall through to the regular excludes logic as it may further exclude.
+ */
+ if (*dtype_p == DT_UNKNOWN)
+ *dtype_p = resolve_dtype(DT_UNKNOWN, istate, pathname, strlen(pathname));
+ if (is_excluded_from_virtualfilesystem(pathname, strlen(pathname), *dtype_p) > 0)
+ return 1;
+
+ pattern = last_matching_pattern(dir, istate, pathname, dtype_p);
if (pattern)
return pattern->flags & PATTERN_FLAG_NEGATIVE ? 0 : 1;
return 0;
@@ -2344,6 +2375,8 @@ static enum path_treatment treat_path(struct dir_struct *dir,
ignore_case);
if (dtype != DT_DIR && has_path_in_index)
return path_none;
+ if (is_excluded_from_virtualfilesystem(path->buf, path->len, dtype) > 0)
+ return path_excluded;
/*
* When we are looking at a directory P in the working tree,
@@ -2548,6 +2581,8 @@ static void add_path_to_appropriate_result_list(struct dir_struct *dir,
/* add the path to the appropriate result list */
switch (state) {
case path_excluded:
+ if (is_excluded_from_virtualfilesystem(path->buf, path->len, DT_DIR) > 0)
+ break;
if (dir->flags & DIR_SHOW_IGNORED)
dir_add_name(dir, istate, path->buf, path->len);
else if ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
@@ -3094,6 +3129,8 @@ static int cmp_icase(char a, char b)
{
if (a == b)
return 0;
+ if (is_dir_sep(a))
+ return is_dir_sep(b) ? 0 : -1;
if (ignore_case)
return toupper(a) - toupper(b);
return a - b;
diff --git a/environment.c b/environment.c
index 18d042b467d26a..2289175605be76 100644
--- a/environment.c
+++ b/environment.c
@@ -72,9 +72,12 @@ int grafts_replace_parents = 1;
int core_apply_sparse_checkout;
int core_sparse_checkout_cone;
int sparse_expect_files_outside_of_patterns;
+int core_gvfs;
+const char *core_virtualfilesystem;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
unsigned long pack_size_limit_cfg;
+int core_virtualize_objects;
enum log_refs_config log_all_ref_updates = LOG_REFS_UNSET;
#ifndef PROTECT_HFS_DEFAULT
@@ -86,6 +89,9 @@ int protect_hfs = PROTECT_HFS_DEFAULT;
#define PROTECT_NTFS_DEFAULT 1
#endif
int protect_ntfs = PROTECT_NTFS_DEFAULT;
+int core_use_gvfs_helper;
+const char *gvfs_cache_server_url;
+struct strbuf gvfs_shared_cache_pathname = STRBUF_INIT;
/*
* The character that begins a commented line in user-editable file
diff --git a/git.c b/git.c
index da411c53822a18..f591a6ef8be5db 100644
--- a/git.c
+++ b/git.c
@@ -1,10 +1,13 @@
#include "builtin.h"
+#include "gvfs.h"
#include "config.h"
#include "exec-cmd.h"
#include "help.h"
#include "run-command.h"
#include "alias.h"
#include "shallow.h"
+#include "dir.h"
+#include "hook.h"
#define RUN_SETUP (1<<0)
#define RUN_SETUP_GENTLY (1<<1)
@@ -17,6 +20,7 @@
#define SUPPORT_SUPER_PREFIX (1<<4)
#define DELAY_PAGER_CONFIG (1<<5)
#define NO_PARSEOPT (1<<6) /* parse-options is not used */
+#define BLOCK_ON_GVFS_REPO (1<<7) /* command not allowed in GVFS repos */
struct cmd_struct {
const char *cmd;
@@ -417,6 +421,68 @@ static int handle_alias(int *argcp, const char ***argv)
return ret;
}
+/* Runs pre/post-command hook */
+static struct strvec sargv = STRVEC_INIT;
+static int run_post_hook = 0;
+static int exit_code = -1;
+
+static int run_pre_command_hook(const char **argv)
+{
+ char *lock;
+ int ret = 0;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ /*
+ * Ensure the global pre/post command hook is only called for
+ * the outer command and not when git is called recursively
+ * or spawns multiple commands (like with the alias command)
+ */
+ lock = getenv("COMMAND_HOOK_LOCK");
+ if (lock && !strcmp(lock, "true"))
+ return 0;
+ setenv("COMMAND_HOOK_LOCK", "true", 1);
+
+ /* call the hook proc */
+ strvec_pushv(&sargv, argv);
+ strvec_pushf(&sargv, "--git-pid=%"PRIuMAX, (uintmax_t)getpid());
+ strvec_pushv(&opt.args, sargv.v);
+ ret = run_hooks_opt("pre-command", &opt);
+
+ if (!ret)
+ run_post_hook = 1;
+ return ret;
+}
+
+static int run_post_command_hook(void)
+{
+ char *lock;
+ int ret = 0;
+ struct run_hooks_opt opt = RUN_HOOKS_OPT_INIT;
+
+ /*
+ * Only run post_command if pre_command succeeded in this process
+ */
+ if (!run_post_hook)
+ return 0;
+ lock = getenv("COMMAND_HOOK_LOCK");
+ if (!lock || strcmp(lock, "true"))
+ return 0;
+
+ strvec_pushv(&opt.args, sargv.v);
+ strvec_pushf(&opt.args, "--exit_code=%u", exit_code);
+ ret = run_hooks_opt("post-command", &opt);
+
+ run_post_hook = 0;
+ strvec_clear(&sargv);
+ setenv("COMMAND_HOOK_LOCK", "false", 1);
+ return ret;
+}
+
+static void post_command_hook_atexit(void)
+{
+ run_post_command_hook();
+}
+
static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
{
int status, help;
@@ -457,18 +523,26 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
if (!help && p->option & NEED_WORK_TREE)
setup_work_tree();
+ if (!help && p->option & BLOCK_ON_GVFS_REPO && gvfs_config_is_set(GVFS_BLOCK_COMMANDS))
+ die("'git %s' is not supported on a GVFS repo", p->cmd);
+
+ if (run_pre_command_hook(argv))
+ die("pre-command hook aborted command");
+
trace_argv_printf(argv, "trace: built-in: git");
trace2_cmd_name(p->cmd);
trace2_cmd_list_config();
trace2_cmd_list_env_vars();
validate_cache_entries(the_repository->index);
- status = p->fn(argc, argv, prefix);
+ exit_code = status = p->fn(argc, argv, prefix);
validate_cache_entries(the_repository->index);
if (status)
return status;
+ run_post_command_hook();
+
/* Somebody closed stdout? */
if (fstat(fileno(stdout), &st))
return 0;
@@ -537,7 +611,7 @@ static struct cmd_struct commands[] = {
{ "for-each-ref", cmd_for_each_ref, RUN_SETUP },
{ "for-each-repo", cmd_for_each_repo, RUN_SETUP_GENTLY },
{ "format-patch", cmd_format_patch, RUN_SETUP },
- { "fsck", cmd_fsck, RUN_SETUP },
+ { "fsck", cmd_fsck, RUN_SETUP | BLOCK_ON_GVFS_REPO},
{ "fsck-objects", cmd_fsck, RUN_SETUP },
{ "fsmonitor--daemon", cmd_fsmonitor__daemon, SUPPORT_SUPER_PREFIX | RUN_SETUP },
{ "gc", cmd_gc, RUN_SETUP },
@@ -578,7 +652,7 @@ static struct cmd_struct commands[] = {
{ "pack-refs", cmd_pack_refs, RUN_SETUP },
{ "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT },
{ "pickaxe", cmd_blame, RUN_SETUP },
- { "prune", cmd_prune, RUN_SETUP },
+ { "prune", cmd_prune, RUN_SETUP | BLOCK_ON_GVFS_REPO},
{ "prune-packed", cmd_prune_packed, RUN_SETUP },
{ "pull", cmd_pull, RUN_SETUP | NEED_WORK_TREE },
{ "push", cmd_push, RUN_SETUP },
@@ -590,7 +664,7 @@ static struct cmd_struct commands[] = {
{ "remote", cmd_remote, RUN_SETUP },
{ "remote-ext", cmd_remote_ext, NO_PARSEOPT },
{ "remote-fd", cmd_remote_fd, NO_PARSEOPT },
- { "repack", cmd_repack, RUN_SETUP },
+ { "repack", cmd_repack, RUN_SETUP | BLOCK_ON_GVFS_REPO },
{ "replace", cmd_replace, RUN_SETUP },
{ "rerere", cmd_rerere, RUN_SETUP },
{ "reset", cmd_reset, RUN_SETUP },
@@ -610,13 +684,14 @@ static struct cmd_struct commands[] = {
{ "stash", cmd_stash, RUN_SETUP | NEED_WORK_TREE },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "stripspace", cmd_stripspace },
- { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
+ { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT | BLOCK_ON_GVFS_REPO },
{ "switch", cmd_switch, RUN_SETUP | NEED_WORK_TREE },
{ "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
{ "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
{ "unpack-file", cmd_unpack_file, RUN_SETUP | NO_PARSEOPT },
{ "unpack-objects", cmd_unpack_objects, RUN_SETUP | NO_PARSEOPT },
{ "update-index", cmd_update_index, RUN_SETUP },
+ { "update-microsoft-git", cmd_update_microsoft_git },
{ "update-ref", cmd_update_ref, RUN_SETUP },
{ "update-server-info", cmd_update_server_info, RUN_SETUP },
{ "upload-archive", cmd_upload_archive, NO_PARSEOPT },
@@ -628,7 +703,7 @@ static struct cmd_struct commands[] = {
{ "verify-tag", cmd_verify_tag, RUN_SETUP },
{ "version", cmd_version },
{ "whatchanged", cmd_whatchanged, RUN_SETUP },
- { "worktree", cmd_worktree, RUN_SETUP },
+ { "worktree", cmd_worktree, RUN_SETUP | BLOCK_ON_GVFS_REPO },
{ "write-tree", cmd_write_tree, RUN_SETUP },
};
@@ -749,13 +824,16 @@ static void execv_dashed_external(const char **argv)
*/
trace_argv_printf(cmd.args.v, "trace: exec:");
+ if (run_pre_command_hook(cmd.args.v))
+ die("pre-command hook aborted command");
+
/*
* If we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code,
* or our usual generic code if we were not even able to exec
* the program.
*/
- status = run_command(&cmd);
+ exit_code = status = run_command(&cmd);
/*
* If the child process ran and we are now going to exit, emit a
@@ -766,6 +844,8 @@ static void execv_dashed_external(const char **argv)
exit(status);
else if (errno != ENOENT)
exit(128);
+
+ run_post_command_hook();
}
static int run_argv(int *argcp, const char ***argv)
@@ -873,6 +953,7 @@ int cmd_main(int argc, const char **argv)
}
trace_command_performance(argv);
+ atexit(post_command_hook_atexit);
/*
* "git-xxxx" is the same as "git xxxx", but we obviously:
@@ -902,10 +983,14 @@ int cmd_main(int argc, const char **argv)
} else {
/* The user didn't specify a command; give them help */
commit_pager_choice();
+ if (run_pre_command_hook(argv))
+ die("pre-command hook aborted command");
printf(_("usage: %s\n\n"), git_usage_string);
list_common_cmds_help();
printf("\n%s\n", _(git_more_info_string));
- exit(1);
+ exit_code = 1;
+ run_post_command_hook();
+ exit(exit_code);
}
cmd = argv[0];
diff --git a/gvfs-helper-client.c b/gvfs-helper-client.c
new file mode 100644
index 00000000000000..e6fbb6ed1a43c3
--- /dev/null
+++ b/gvfs-helper-client.c
@@ -0,0 +1,568 @@
+#include "cache.h"
+#include "strvec.h"
+#include "trace2.h"
+#include "oidset.h"
+#include "object.h"
+#include "object-store.h"
+#include "gvfs-helper-client.h"
+#include "sub-process.h"
+#include "sigchain.h"
+#include "pkt-line.h"
+#include "quote.h"
+#include "packfile.h"
+
+static struct oidset gh_client__oidset_queued = OIDSET_INIT;
+static unsigned long gh_client__oidset_count;
+
+struct gh_server__process {
+ struct subprocess_entry subprocess; /* must be first */
+ unsigned int supported_capabilities;
+};
+
+static int gh_server__subprocess_map_initialized;
+static struct hashmap gh_server__subprocess_map;
+static struct object_directory *gh_client__chosen_odb;
+
+/*
+ * The "objects" capability has verbs: "get" and "post" and "prefetch".
+ */
+#define CAP_OBJECTS (1u<<1)
+#define CAP_OBJECTS_NAME "objects"
+
+#define CAP_OBJECTS__VERB_GET1_NAME "get"
+#define CAP_OBJECTS__VERB_POST_NAME "post"
+#define CAP_OBJECTS__VERB_PREFETCH_NAME "prefetch"
+
+static int gh_client__start_fn(struct subprocess_entry *subprocess)
+{
+ static int versions[] = {1, 0};
+ static struct subprocess_capability capabilities[] = {
+ { CAP_OBJECTS_NAME, CAP_OBJECTS },
+ { NULL, 0 }
+ };
+
+ struct gh_server__process *entry = (struct gh_server__process *)subprocess;
+
+ return subprocess_handshake(subprocess, "gvfs-helper", versions,
+ NULL, capabilities,
+ &entry->supported_capabilities);
+}
+
+/*
+ * Send the queued OIDs in the OIDSET to gvfs-helper for it to
+ * fetch from the cache-server or main Git server using "/gvfs/objects"
+ * POST semantics.
+ *
+ * objects.post LF
+ * ( LF)*
+ *
+ *
+ */
+static int gh_client__send__objects_post(struct child_process *process)
+{
+ struct oidset_iter iter;
+ struct object_id *oid;
+ int err;
+
+ /*
+ * We assume that all of the packet_ routines call error()
+ * so that we don't have to.
+ */
+
+ err = packet_write_fmt_gently(
+ process->in,
+ (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_POST_NAME "\n"));
+ if (err)
+ return err;
+
+ oidset_iter_init(&gh_client__oidset_queued, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
+ err = packet_write_fmt_gently(process->in, "%s\n",
+ oid_to_hex(oid));
+ if (err)
+ return err;
+ }
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Send the given OID to gvfs-helper for it to fetch from the
+ * cache-server or main Git server using "/gvfs/objects" GET
+ * semantics.
+ *
+ * This ignores any queued OIDs.
+ *
+ * objects.get LF
+ * LF
+ *
+ *
+ */
+static int gh_client__send__objects_get(struct child_process *process,
+ const struct object_id *oid)
+{
+ int err;
+
+ /*
+ * We assume that all of the packet_ routines call error()
+ * so that we don't have to.
+ */
+
+ err = packet_write_fmt_gently(
+ process->in,
+ (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_GET1_NAME "\n"));
+ if (err)
+ return err;
+
+ err = packet_write_fmt_gently(process->in, "%s\n",
+ oid_to_hex(oid));
+ if (err)
+ return err;
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Send a request to gvfs-helper to prefetch packfiles from either the
+ * cache-server or the main Git server using "/gvfs/prefetch".
+ *
+ * objects.prefetch LF
+ * [ LF]
+ *
+ */
+static int gh_client__send__objects_prefetch(struct child_process *process,
+ timestamp_t seconds_since_epoch)
+{
+ int err;
+
+ /*
+ * We assume that all of the packet_ routines call error()
+ * so that we don't have to.
+ */
+
+ err = packet_write_fmt_gently(
+ process->in,
+ (CAP_OBJECTS_NAME "." CAP_OBJECTS__VERB_PREFETCH_NAME "\n"));
+ if (err)
+ return err;
+
+ if (seconds_since_epoch) {
+ err = packet_write_fmt_gently(process->in, "%" PRItime "\n",
+ seconds_since_epoch);
+ if (err)
+ return err;
+ }
+
+ err = packet_flush_gently(process->in);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Update the loose object cache to include the newly created
+ * object.
+ */
+static void gh_client__update_loose_cache(const char *line)
+{
+ const char *v1_oid;
+ struct object_id oid;
+
+ if (!skip_prefix(line, "loose ", &v1_oid))
+ BUG("update_loose_cache: invalid line '%s'", line);
+
+ if (get_oid_hex(v1_oid, &oid))
+ BUG("update_loose_cache: invalid line '%s'", line);
+
+ odb_loose_cache_add_new_oid(gh_client__chosen_odb, &oid);
+}
+
+/*
+ * Update the packed-git list to include the newly created packfile.
+ */
+static void gh_client__update_packed_git(const char *line)
+{
+ struct strbuf path = STRBUF_INIT;
+ const char *v1_filename;
+ struct packed_git *p;
+ int is_local;
+
+ if (!skip_prefix(line, "packfile ", &v1_filename))
+ BUG("update_packed_git: invalid line '%s'", line);
+
+ /*
+ * ODB[0] is the local .git/objects. All others are alternates.
+ */
+ is_local = (gh_client__chosen_odb == the_repository->objects->odb);
+
+ strbuf_addf(&path, "%s/pack/%s",
+ gh_client__chosen_odb->path, v1_filename);
+ strbuf_strip_suffix(&path, ".pack");
+ strbuf_addstr(&path, ".idx");
+
+ p = add_packed_git(path.buf, path.len, is_local);
+ if (p)
+ install_packed_git_and_mru(the_repository, p);
+}
+
+/*
+ * CAP_OBJECTS verbs return the same format response:
+ *
+ *
+ * *
+ *
+ *
+ *
+ * Where:
+ *
+ * ::= odb SP LF
+ *
+ * ::= /
+ *
+ * ::= packfile SP LF
+ *
+ * ::= loose SP LF
+ *
+ * ::= ok LF
+ * / partial LF
+ * / error SP LF
+ *
+ * Note that `gvfs-helper` controls how/if it chunks the request when
+ * it talks to the cache-server and/or main Git server. So it is
+ * possible for us to receive many packfiles and/or loose objects *AND
+ * THEN* get a hard network error or a 404 on an individual object.
+ *
+ * If we get a partial result, we can let the caller try to continue
+ * -- for example, maybe an immediate request for a tree object was
+ * grouped with a queued request for a blob. The tree-walk *might* be
+ * able to continue and let the 404 blob be handled later.
+ */
+static int gh_client__objects__receive_response(
+ struct child_process *process,
+ enum gh_client__created *p_ghc,
+ int *p_nr_loose, int *p_nr_packfile)
+{
+ enum gh_client__created ghc = GHC__CREATED__NOTHING;
+ const char *v1;
+ char *line;
+ int len;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ while (1) {
+ /*
+ * Warning: packet_read_line_gently() calls die()
+ * despite the _gently moniker.
+ */
+ len = packet_read_line_gently(process->out, NULL, &line);
+ if ((len < 0) || !line)
+ break;
+
+ if (starts_with(line, "odb")) {
+ /* trust that this matches what we expect */
+ }
+
+ else if (starts_with(line, "packfile")) {
+ gh_client__update_packed_git(line);
+ ghc |= GHC__CREATED__PACKFILE;
+ nr_packfile++;
+ }
+
+ else if (starts_with(line, "loose")) {
+ gh_client__update_loose_cache(line);
+ ghc |= GHC__CREATED__LOOSE;
+ nr_loose++;
+ }
+
+ else if (starts_with(line, "ok"))
+ ;
+ else if (starts_with(line, "partial"))
+ ;
+ else if (skip_prefix(line, "error ", &v1)) {
+ error("gvfs-helper error: '%s'", v1);
+ err = -1;
+ }
+ }
+
+ *p_ghc = ghc;
+ *p_nr_loose = nr_loose;
+ *p_nr_packfile = nr_packfile;
+
+ return err;
+}
+
+/*
+ * Select the preferred ODB for fetching missing objects.
+ * This should be the alternate with the same directory
+ * name as set in `gvfs.sharedCache`.
+ *
+ * Fallback to .git/objects if necessary.
+ */
+static void gh_client__choose_odb(void)
+{
+ struct object_directory *odb;
+
+ if (gh_client__chosen_odb)
+ return;
+
+ prepare_alt_odb(the_repository);
+ gh_client__chosen_odb = the_repository->objects->odb;
+
+ if (!gvfs_shared_cache_pathname.len)
+ return;
+
+ for (odb = the_repository->objects->odb->next; odb; odb = odb->next) {
+ if (!strcmp(odb->path, gvfs_shared_cache_pathname.buf)) {
+ gh_client__chosen_odb = odb;
+ return;
+ }
+ }
+}
+
+static struct gh_server__process *gh_client__find_long_running_process(
+ unsigned int cap_needed)
+{
+ struct gh_server__process *entry;
+ struct strvec argv = STRVEC_INIT;
+ struct strbuf quoted = STRBUF_INIT;
+
+ gh_client__choose_odb();
+
+ /*
+ * TODO decide what defaults we want.
+ */
+ strvec_push(&argv, "gvfs-helper");
+ strvec_push(&argv, "--fallback");
+ strvec_push(&argv, "--cache-server=trust");
+ strvec_pushf(&argv, "--shared-cache=%s",
+ gh_client__chosen_odb->path);
+ strvec_push(&argv, "server");
+
+ sq_quote_argv_pretty("ed, argv.v);
+
+ /*
+ * Find an existing long-running process with the above command
+ * line -or- create a new long-running process for this and
+ * subsequent requests.
+ */
+ if (!gh_server__subprocess_map_initialized) {
+ gh_server__subprocess_map_initialized = 1;
+ hashmap_init(&gh_server__subprocess_map,
+ (hashmap_cmp_fn)cmd2process_cmp, NULL, 0);
+ entry = NULL;
+ } else
+ entry = (struct gh_server__process *)subprocess_find_entry(
+ &gh_server__subprocess_map, quoted.buf);
+
+ if (!entry) {
+ entry = xmalloc(sizeof(*entry));
+ entry->supported_capabilities = 0;
+
+ if (subprocess_start_strvec(&gh_server__subprocess_map,
+ &entry->subprocess, 1,
+ &argv, gh_client__start_fn))
+ FREE_AND_NULL(entry);
+ }
+
+ if (entry &&
+ (entry->supported_capabilities & cap_needed) != cap_needed) {
+ error("gvfs-helper: does not support needed capabilities");
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ strvec_clear(&argv);
+ strbuf_release("ed);
+
+ return entry;
+}
+
+void gh_client__queue_oid(const struct object_id *oid)
+{
+ /*
+ * Keep this trace as a printf only, so that it goes to the
+ * perf log, but not the event log. It is useful for interactive
+ * debugging, but generates way too much (unuseful) noise for the
+ * database.
+ */
+ if (trace2_is_enabled())
+ trace2_printf("gh_client__queue_oid: %s", oid_to_hex(oid));
+
+ if (!oidset_insert(&gh_client__oidset_queued, oid))
+ gh_client__oidset_count++;
+}
+
+/*
+ * This routine should actually take a "const struct oid_array *"
+ * rather than the component parts, but fetch_objects() uses
+ * this model (because of the call in sha1-file.c).
+ */
+void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr)
+{
+ int k;
+
+ for (k = 0; k < oid_nr; k++)
+ gh_client__queue_oid(&oids[k]);
+}
+
+/*
+ * Bulk fetch all of the queued OIDs in the OIDSET.
+ */
+int gh_client__drain_queue(enum gh_client__created *p_ghc)
+{
+ struct gh_server__process *entry;
+ struct child_process *process;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ *p_ghc = GHC__CREATED__NOTHING;
+
+ if (!gh_client__oidset_count)
+ return 0;
+
+ entry = gh_client__find_long_running_process(CAP_OBJECTS);
+ if (!entry)
+ return -1;
+
+ trace2_region_enter("gh-client", "objects/post", the_repository);
+
+ process = &entry->subprocess.process;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = gh_client__send__objects_post(process);
+ if (!err)
+ err = gh_client__objects__receive_response(
+ process, p_ghc, &nr_loose, &nr_packfile);
+
+ sigchain_pop(SIGPIPE);
+
+ if (err) {
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ trace2_data_intmax("gh-client", the_repository,
+ "objects/post/nr_objects", gh_client__oidset_count);
+ trace2_region_leave("gh-client", "objects/post", the_repository);
+
+ oidset_clear(&gh_client__oidset_queued);
+ gh_client__oidset_count = 0;
+
+ return err;
+}
+
+/*
+ * Get exactly 1 object immediately.
+ * Ignore any queued objects.
+ */
+int gh_client__get_immediate(const struct object_id *oid,
+ enum gh_client__created *p_ghc)
+{
+ struct gh_server__process *entry;
+ struct child_process *process;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ /*
+ * Keep this trace as a printf only, so that it goes to the
+ * perf log, but not the event log. It is useful for interactive
+ * debugging, but generates way too much (unuseful) noise for the
+ * database.
+ */
+ if (trace2_is_enabled())
+ trace2_printf("gh_client__get_immediate: %s", oid_to_hex(oid));
+
+ entry = gh_client__find_long_running_process(CAP_OBJECTS);
+ if (!entry)
+ return -1;
+
+ trace2_region_enter("gh-client", "objects/get", the_repository);
+
+ process = &entry->subprocess.process;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = gh_client__send__objects_get(process, oid);
+ if (!err)
+ err = gh_client__objects__receive_response(
+ process, p_ghc, &nr_loose, &nr_packfile);
+
+ sigchain_pop(SIGPIPE);
+
+ if (err) {
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ trace2_region_leave("gh-client", "objects/get", the_repository);
+
+ return err;
+}
+
+/*
+ * Ask gvfs-helper to prefetch commits-and-trees packfiles since a
+ * given timestamp.
+ *
+ * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
+ * the last received prefetch and ask for ones newer than that.
+ */
+int gh_client__prefetch(timestamp_t seconds_since_epoch,
+ int *nr_packfiles_received)
+{
+ struct gh_server__process *entry;
+ struct child_process *process;
+ enum gh_client__created ghc;
+ int nr_loose = 0;
+ int nr_packfile = 0;
+ int err = 0;
+
+ entry = gh_client__find_long_running_process(CAP_OBJECTS);
+ if (!entry)
+ return -1;
+
+ trace2_region_enter("gh-client", "objects/prefetch", the_repository);
+ trace2_data_intmax("gh-client", the_repository, "prefetch/since",
+ seconds_since_epoch);
+
+ process = &entry->subprocess.process;
+
+ sigchain_push(SIGPIPE, SIG_IGN);
+
+ err = gh_client__send__objects_prefetch(process, seconds_since_epoch);
+ if (!err)
+ err = gh_client__objects__receive_response(
+ process, &ghc, &nr_loose, &nr_packfile);
+
+ sigchain_pop(SIGPIPE);
+
+ if (err) {
+ subprocess_stop(&gh_server__subprocess_map,
+ (struct subprocess_entry *)entry);
+ FREE_AND_NULL(entry);
+ }
+
+ trace2_data_intmax("gh-client", the_repository,
+ "prefetch/packfile_count", nr_packfile);
+ trace2_region_leave("gh-client", "objects/prefetch", the_repository);
+
+ if (nr_packfiles_received)
+ *nr_packfiles_received = nr_packfile;
+
+ return err;
+}
diff --git a/gvfs-helper-client.h b/gvfs-helper-client.h
new file mode 100644
index 00000000000000..7692534ecda54c
--- /dev/null
+++ b/gvfs-helper-client.h
@@ -0,0 +1,87 @@
+#ifndef GVFS_HELPER_CLIENT_H
+#define GVFS_HELPER_CLIENT_H
+
+struct repository;
+struct commit;
+struct object_id;
+
+enum gh_client__created {
+ /*
+ * The _get_ operation did not create anything. If doesn't
+ * matter if `gvfs-helper` had errors or not -- just that
+ * nothing was created.
+ */
+ GHC__CREATED__NOTHING = 0,
+
+ /*
+ * The _get_ operation created one or more packfiles.
+ */
+ GHC__CREATED__PACKFILE = 1<<1,
+
+ /*
+ * The _get_ operation created one or more loose objects.
+ * (Not necessarily the for the individual OID you requested.)
+ */
+ GHC__CREATED__LOOSE = 1<<2,
+
+ /*
+ * The _get_ operation created one or more packfilea *and*
+ * one or more loose objects.
+ */
+ GHC__CREATED__PACKFILE_AND_LOOSE = (GHC__CREATED__PACKFILE |
+ GHC__CREATED__LOOSE),
+};
+
+/*
+ * Ask `gvfs-helper server` to immediately fetch a single object
+ * using "/gvfs/objects" GET semantics.
+ *
+ * A long-running background process is used to make subsequent
+ * requests more efficient.
+ *
+ * A loose object will be created in the shared-cache ODB and
+ * in-memory cache updated.
+ */
+int gh_client__get_immediate(const struct object_id *oid,
+ enum gh_client__created *p_ghc);
+
+/*
+ * Queue this OID for a future fetch using `gvfs-helper service`.
+ * It does not wait.
+ *
+ * Callers should not rely on the queued object being on disk until
+ * the queue has been drained.
+ */
+void gh_client__queue_oid(const struct object_id *oid);
+void gh_client__queue_oid_array(const struct object_id *oids, int oid_nr);
+
+/*
+ * Ask `gvfs-helper server` to fetch the set of queued OIDs using
+ * "/gvfs/objects" POST semantics.
+ *
+ * A long-running background process is used to subsequent requests
+ * more efficient.
+ *
+ * One or more packfiles will be created in the shared-cache ODB.
+ */
+int gh_client__drain_queue(enum gh_client__created *p_ghc);
+
+/*
+ * Ask `gvfs-helper server` to fetch any "prefetch packs"
+ * available on the server more recent than the requested time.
+ *
+ * If seconds_since_epoch is zero, gvfs-helper will scan the ODB for
+ * the last received prefetch and ask for ones newer than that.
+ *
+ * A long-running background process is used to subsequent requests
+ * (either prefetch or regular immediate/queued requests) more efficient.
+ *
+ * One or more packfiles will be created in the shared-cache ODB.
+ *
+ * Returns 0 on success, -1 on error. Optionally also returns the
+ * number of prefetch packs received.
+ */
+int gh_client__prefetch(timestamp_t seconds_since_epoch,
+ int *nr_packfiles_received);
+
+#endif /* GVFS_HELPER_CLIENT_H */
diff --git a/gvfs-helper.c b/gvfs-helper.c
new file mode 100644
index 00000000000000..e8e6fbbdd9f361
--- /dev/null
+++ b/gvfs-helper.c
@@ -0,0 +1,4217 @@
+// TODO Write a man page. Here are some notes for dogfooding.
+// TODO
+//
+// Usage: git gvfs-helper [] []
+//
+// :
+//
+// --remote= // defaults to "origin"
+//
+// --fallback // boolean. defaults to off
+//
+// When a fetch from the cache-server fails, automatically
+// fallback to the main Git server. This option has no effect
+// if no cache-server is defined.
+//
+// --cache-server=]