From f78e3511b1722f02c0647a379dee7623c9b3bf03 Mon Sep 17 00:00:00 2001 From: DisposaBoy Date: Wed, 1 Jan 2020 11:24:57 +0000 Subject: [PATCH] Next (#956) 20.01.01 --- CHANGELOG.md | 27 + gosubl/about.py | 2 +- src/margo.sh/.github/workflows/margo-ci.yml | 22 + src/margo.sh/.travis.yml | 18 - src/margo.sh/Gopkg.lock | 47 +- src/margo.sh/Gopkg.toml | 4 + src/margo.sh/cmdpkg/margosublime/main.go | 2 +- src/margo.sh/golang/cursor/curctx.go | 19 + src/margo.sh/golang/gocode_suggest.go | 5 + src/margo.sh/golang/gopkg/import.go | 63 +- src/margo.sh/golang/goutil/goutil_test.go | 2 + src/margo.sh/golang/margocode.go | 15 +- src/margo.sh/golang/snippets/http-snippet.go | 60 + src/margo.sh/golang/snippets/snippets.go | 1 + src/margo.sh/golang/typecheck.go | 75 +- src/margo.sh/kimporter/kimporter.go | 186 +- src/margo.sh/kimporter/kimporter_test.go | 34 - src/margo.sh/kimporter/parse.go | 2 + src/margo.sh/memo/memo.go | 158 + src/margo.sh/mg/action.go | 64 + src/margo.sh/mg/agent.go | 3 + src/margo.sh/mg/ctx.go | 15 +- src/margo.sh/mg/issue.go | 67 +- src/margo.sh/mg/issue_test.go | 2 + src/margo.sh/mg/oom.go | 5 + src/margo.sh/mg/oom_nix.go | 26 + src/margo.sh/mg/oom_win.go | 9 + src/margo.sh/mg/restart.go | 22 +- src/margo.sh/mg/store.go | 45 +- src/margo.sh/mg/vfs.go | 18 +- src/margo.sh/mgpf/mgpf.go | 23 +- src/margo.sh/mgutil/memo.go | 104 +- src/margo.sh/mgutil/path_test.go | 2 + src/margo.sh/mgutil/sync.go | 30 + .../github.com/cpuguy83/go-md2man/LICENSE.md | 21 + .../cpuguy83/go-md2man/md2man/md2man.go | 20 + .../cpuguy83/go-md2man/md2man/roff.go | 285 + .../github.com/karrick/godirwalk/README.md | 56 +- .../karrick/godirwalk/azure-pipelines.yml | 6 +- .../karrick/godirwalk/debug_development.go | 14 + .../karrick/godirwalk/debug_release.go | 6 + .../github.com/karrick/godirwalk/dirent.go | 22 +- .../github.com/karrick/godirwalk/go.mod | 2 - .../github.com/karrick/godirwalk/modeType.go | 22 + .../karrick/godirwalk/modeTypeWithType.go | 22 +- .../karrick/godirwalk/modeTypeWithoutType.go | 22 +- .../github.com/karrick/godirwalk/readdir.go | 46 +- .../karrick/godirwalk/readdir_unix.go | 115 - .../karrick/godirwalk/readdir_windows.go | 51 - .../karrick/godirwalk/scandir_unix.go | 140 + .../karrick/godirwalk/scandir_windows.go | 92 + .../github.com/karrick/godirwalk/scanner.go | 44 + .../github.com/karrick/godirwalk/walk.go | 110 +- .../russross/blackfriday/.gitignore | 8 + .../russross/blackfriday/.travis.yml | 17 + .../russross/blackfriday/LICENSE.txt | 29 + .../github.com/russross/blackfriday/README.md | 369 + .../github.com/russross/blackfriday/block.go | 1474 ++ .../github.com/russross/blackfriday/doc.go | 32 + .../github.com/russross/blackfriday/go.mod | 1 + .../github.com/russross/blackfriday/html.go | 938 + .../github.com/russross/blackfriday/inline.go | 1154 + .../github.com/russross/blackfriday/latex.go | 334 + .../russross/blackfriday/markdown.go | 941 + .../russross/blackfriday/smartypants.go | 430 + .../vendor/github.com/urfave/cli/.gitignore | 1 + .../vendor/github.com/urfave/cli/.travis.yml | 36 +- .../vendor/github.com/urfave/cli/CHANGELOG.md | 59 +- .../github.com/urfave/cli/CODE_OF_CONDUCT.md | 74 + .../github.com/urfave/cli/CONTRIBUTING.md | 18 + .../vendor/github.com/urfave/cli/README.md | 316 +- .../vendor/github.com/urfave/cli/app.go | 113 +- .../vendor/github.com/urfave/cli/appveyor.yml | 24 +- .../vendor/github.com/urfave/cli/build.go | 187 + .../vendor/github.com/urfave/cli/category.go | 2 +- .../vendor/github.com/urfave/cli/cli.go | 2 +- .../vendor/github.com/urfave/cli/command.go | 149 +- .../vendor/github.com/urfave/cli/context.go | 82 +- .../vendor/github.com/urfave/cli/docs.go | 145 + .../vendor/github.com/urfave/cli/fish.go | 190 + .../github.com/urfave/cli/flag-types.json | 93 - .../vendor/github.com/urfave/cli/flag.go | 399 +- .../github.com/urfave/cli/flag_generated.go | 464 +- .../vendor/github.com/urfave/cli/funcs.go | 16 + .../github.com/urfave/cli/generate-flag-types | 255 - .../vendor/github.com/urfave/cli/go.mod | 9 + .../vendor/github.com/urfave/cli/go.sum | 10 + .../vendor/github.com/urfave/cli/help.go | 182 +- .../vendor/github.com/urfave/cli/parse.go | 80 + .../vendor/github.com/urfave/cli/runtests | 13 - .../vendor/github.com/urfave/cli/sort.go | 29 + .../vendor/github.com/urfave/cli/template.go | 121 + .../vendor/golang.org/x/sys/cpu/byteorder.go | 38 +- .../vendor/golang.org/x/sys/cpu/cpu.go | 36 + .../vendor/golang.org/x/sys/cpu/cpu_arm.go | 33 +- .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 2 +- .../golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + .../golang.org/x/sys/cpu/cpu_mips64x.go | 2 - .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 2 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 2 - .../golang.org/x/sys/cpu/cpu_riscv64.go | 9 + .../vendor/golang.org/x/sys/cpu/cpu_wasm.go | 2 - .../golang.org/x/sys/unix/affinity_linux.go | 42 +- .../golang.org/x/sys/unix/bluetooth_linux.go | 1 + .../vendor/golang.org/x/sys/unix/dirent.go | 2 +- .../golang.org/x/sys/unix/endian_little.go | 2 +- .../vendor/golang.org/x/sys/unix/fdset.go | 29 + .../vendor/golang.org/x/sys/unix/ioctl.go | 41 +- .../vendor/golang.org/x/sys/unix/mkall.sh | 4 +- .../golang.org/x/sys/unix/mkasm_darwin.go | 55 +- .../vendor/golang.org/x/sys/unix/mkerrors.sh | 58 +- .../vendor/golang.org/x/sys/unix/mksyscall.go | 7 +- .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- .../golang.org/x/sys/unix/sockcmsg_unix.go | 36 +- .../x/sys/unix/sockcmsg_unix_other.go | 38 + .../golang.org/x/sys/unix/syscall_aix.go | 39 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 4 + .../x/sys/unix/syscall_aix_ppc64.go | 4 + .../golang.org/x/sys/unix/syscall_bsd.go | 4 +- .../x/sys/unix/syscall_darwin.1_12.go | 29 + .../x/sys/unix/syscall_darwin.1_13.go | 101 + .../golang.org/x/sys/unix/syscall_darwin.go | 40 +- .../x/sys/unix/syscall_darwin_386.1_11.go | 9 + .../x/sys/unix/syscall_darwin_386.go | 7 +- .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 + .../x/sys/unix/syscall_darwin_amd64.go | 7 +- .../x/sys/unix/syscall_darwin_arm.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm.go | 12 +- .../x/sys/unix/syscall_darwin_arm64.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm64.go | 12 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 + .../x/sys/unix/syscall_dragonfly.go | 59 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 4 + .../golang.org/x/sys/unix/syscall_freebsd.go | 48 +- .../x/sys/unix/syscall_freebsd_386.go | 4 + .../x/sys/unix/syscall_freebsd_amd64.go | 4 + .../x/sys/unix/syscall_freebsd_arm.go | 4 + .../x/sys/unix/syscall_freebsd_arm64.go | 4 + .../golang.org/x/sys/unix/syscall_linux.go | 175 +- .../x/sys/unix/syscall_linux_386.go | 4 + .../x/sys/unix/syscall_linux_amd64.go | 4 + .../x/sys/unix/syscall_linux_arm.go | 4 + .../x/sys/unix/syscall_linux_arm64.go | 4 + .../x/sys/unix/syscall_linux_mips64x.go | 4 + .../x/sys/unix/syscall_linux_mipsx.go | 4 + .../x/sys/unix/syscall_linux_ppc64x.go | 4 + .../x/sys/unix/syscall_linux_riscv64.go | 4 + .../x/sys/unix/syscall_linux_s390x.go | 4 + .../x/sys/unix/syscall_linux_sparc64.go | 4 + .../golang.org/x/sys/unix/syscall_netbsd.go | 39 +- .../x/sys/unix/syscall_netbsd_386.go | 4 + .../x/sys/unix/syscall_netbsd_amd64.go | 4 + .../x/sys/unix/syscall_netbsd_arm.go | 4 + .../x/sys/unix/syscall_netbsd_arm64.go | 4 + .../golang.org/x/sys/unix/syscall_openbsd.go | 39 +- .../x/sys/unix/syscall_openbsd_386.go | 4 + .../x/sys/unix/syscall_openbsd_amd64.go | 4 + .../x/sys/unix/syscall_openbsd_arm.go | 4 + .../x/sys/unix/syscall_openbsd_arm64.go | 4 + .../golang.org/x/sys/unix/syscall_solaris.go | 34 +- .../x/sys/unix/syscall_solaris_amd64.go | 4 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 12 +- .../x/sys/unix/zerrors_aix_ppc64.go | 12 +- .../x/sys/unix/zerrors_darwin_386.go | 3 +- .../x/sys/unix/zerrors_darwin_amd64.go | 3 +- .../x/sys/unix/zerrors_darwin_arm.go | 3 +- .../x/sys/unix/zerrors_darwin_arm64.go | 3 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 + .../x/sys/unix/zerrors_freebsd_386.go | 3 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 3 +- .../x/sys/unix/zerrors_freebsd_arm.go | 3 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 3 +- .../x/sys/unix/zerrors_linux_386.go | 5458 ++-- .../x/sys/unix/zerrors_linux_amd64.go | 5458 ++-- .../x/sys/unix/zerrors_linux_arm.go | 5470 ++-- .../x/sys/unix/zerrors_linux_arm64.go | 5442 ++-- .../x/sys/unix/zerrors_linux_mips.go | 5462 ++-- .../x/sys/unix/zerrors_linux_mips64.go | 5462 ++-- .../x/sys/unix/zerrors_linux_mips64le.go | 5462 ++-- .../x/sys/unix/zerrors_linux_mipsle.go | 5462 ++-- .../x/sys/unix/zerrors_linux_ppc64.go | 5580 +++-- .../x/sys/unix/zerrors_linux_ppc64le.go | 5580 +++-- .../x/sys/unix/zerrors_linux_riscv64.go | 5432 ++-- .../x/sys/unix/zerrors_linux_s390x.go | 5578 +++-- .../x/sys/unix/zerrors_linux_sparc64.go | 5558 +++-- .../x/sys/unix/zerrors_netbsd_386.go | 3 +- .../x/sys/unix/zerrors_netbsd_amd64.go | 3 +- .../x/sys/unix/zerrors_netbsd_arm.go | 3 +- .../x/sys/unix/zerrors_netbsd_arm64.go | 3 +- .../x/sys/unix/zerrors_openbsd_386.go | 17 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 6 +- .../x/sys/unix/zerrors_openbsd_arm.go | 11 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 1 + .../x/sys/unix/zerrors_solaris_amd64.go | 3 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 93 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_386.go | 114 +- .../x/sys/unix/zsyscall_darwin_386.s | 10 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 93 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_amd64.go | 99 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 10 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 49 +- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm.go | 77 +- .../x/sys/unix/zsyscall_darwin_arm.s | 6 +- .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 49 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm64.go | 77 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 6 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 5 +- .../x/sys/unix/zsyscall_freebsd_386.go | 5 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 45 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 45 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 45 +- .../x/sys/unix/zsyscall_linux_386.go | 30 + .../x/sys/unix/zsyscall_linux_amd64.go | 30 + .../x/sys/unix/zsyscall_linux_arm.go | 30 + .../x/sys/unix/zsyscall_linux_arm64.go | 30 + .../x/sys/unix/zsyscall_linux_mips.go | 30 + .../x/sys/unix/zsyscall_linux_mips64.go | 30 + .../x/sys/unix/zsyscall_linux_mips64le.go | 30 + .../x/sys/unix/zsyscall_linux_mipsle.go | 30 + .../x/sys/unix/zsyscall_linux_ppc64.go | 30 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 30 + .../x/sys/unix/zsyscall_linux_riscv64.go | 30 + .../x/sys/unix/zsyscall_linux_s390x.go | 30 + .../x/sys/unix/zsyscall_linux_sparc64.go | 30 + .../x/sys/unix/zsyscall_netbsd_386.go | 37 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 37 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 37 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 37 +- .../x/sys/unix/zsyscall_openbsd_386.go | 37 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 37 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 37 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 37 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 5 +- .../x/sys/unix/zsysnum_linux_386.go | 2 + .../x/sys/unix/zsysnum_linux_amd64.go | 2 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 2 + .../x/sys/unix/zsysnum_linux_mips64.go | 2 + .../x/sys/unix/zsysnum_linux_mips64le.go | 2 + .../x/sys/unix/zsysnum_linux_mipsle.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 + .../x/sys/unix/zsysnum_linux_s390x.go | 2 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 321 +- .../x/sys/unix/ztypes_linux_amd64.go | 322 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 322 +- .../x/sys/unix/ztypes_linux_arm64.go | 322 +- .../x/sys/unix/ztypes_linux_mips.go | 322 +- .../x/sys/unix/ztypes_linux_mips64.go | 323 +- .../x/sys/unix/ztypes_linux_mips64le.go | 323 +- .../x/sys/unix/ztypes_linux_mipsle.go | 322 +- .../x/sys/unix/ztypes_linux_ppc64.go | 322 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 322 +- .../x/sys/unix/ztypes_linux_riscv64.go | 323 +- .../x/sys/unix/ztypes_linux_s390x.go | 322 +- .../x/sys/unix/ztypes_linux_sparc64.go | 322 +- .../x/tools/go/buildutil/overlay.go | 2 +- .../x/tools/go/gcexportdata/gcexportdata.go | 2 +- .../x/tools/go/internal/gcimporter/bimport.go | 11 +- .../x/tools/go/internal/gcimporter/iexport.go | 34 +- .../x/tools/go/internal/gcimporter/iimport.go | 68 +- .../tools/go/internal/packagesdriver/sizes.go | 40 +- .../x/tools/go/packages/external.go | 8 +- .../golang.org/x/tools/go/packages/golist.go | 347 +- .../x/tools/go/packages/golist_overlay.go | 61 +- .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 108 +- .../golang.org/x/tools/go/pointer/gen.go | 2 +- .../golang.org/x/tools/go/pointer/hvn.go | 2 +- .../vendor/golang.org/x/tools/go/ssa/func.go | 8 +- .../golang.org/x/tools/go/ssa/ssautil/load.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 42 +- .../x/tools/internal/imports/fix.go | 537 +- .../x/tools/internal/imports/imports.go | 112 +- .../x/tools/internal/imports/mkstdlib.go | 14 +- .../x/tools/internal/imports/mod.go | 538 +- .../x/tools/internal/imports/mod_cache.go | 165 + .../x/tools/internal/imports/sortimports.go | 47 + .../x/tools/internal/imports/zstdlib.go | 20690 ++++++++-------- .../golang.org/x/tools/internal/span/parse.go | 100 + .../golang.org/x/tools/internal/span/span.go | 285 + .../golang.org/x/tools/internal/span/token.go | 179 + .../x/tools/internal/span/token111.go | 39 + .../x/tools/internal/span/token112.go | 16 + .../golang.org/x/tools/internal/span/uri.go | 152 + .../golang.org/x/tools/internal/span/utf16.go | 94 + src/margo.sh/vfs/meta.go | 25 +- src/margo.sh/vfs/vfs.go | 132 +- 302 files changed, 65335 insertions(+), 48399 deletions(-) create mode 100644 src/margo.sh/.github/workflows/margo-ci.yml delete mode 100644 src/margo.sh/.travis.yml create mode 100644 src/margo.sh/golang/snippets/http-snippet.go delete mode 100644 src/margo.sh/kimporter/kimporter_test.go create mode 100644 src/margo.sh/memo/memo.go create mode 100644 src/margo.sh/mg/oom.go create mode 100644 src/margo.sh/mg/oom_nix.go create mode 100644 src/margo.sh/mg/oom_win.go create mode 100644 src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md create mode 100644 src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go create mode 100644 src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go create mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go create mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go create mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go delete mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go delete mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go create mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go create mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go create mode 100644 src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/README.md create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/block.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/doc.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/go.mod create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/html.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/inline.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/latex.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/markdown.go create mode 100644 src/margo.sh/vendor/github.com/russross/blackfriday/smartypants.go create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/CONTRIBUTING.md create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/build.go create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/docs.go create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/fish.go delete mode 100644 src/margo.sh/vendor/github.com/urfave/cli/flag-types.json delete mode 100755 src/margo.sh/vendor/github.com/urfave/cli/generate-flag-types create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/go.mod create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/go.sum create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/parse.go create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/sort.go create mode 100644 src/margo.sh/vendor/github.com/urfave/cli/template.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/fdset.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 src/margo.sh/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 src/margo.sh/vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/imports/mod_cache.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/parse.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/span.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/token.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/token111.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/token112.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/uri.go create mode 100644 src/margo.sh/vendor/golang.org/x/tools/internal/span/utf16.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 23bd89ea..2f5ee4cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,33 @@ https://margo.sh/b/motd - Get notified when GoSublime has a new release. ## Changes +## 20.01.01 + +This release mainly focuses on under-the-hood improvements for module support. + +- The default auto-completion import mode has been changed to `Kim-Porter`, our solution for auto-completion and package/module going forward. + + One side-effect of this change is that unimported-packages support is less reliable but we feel this is a small drawback when compared to the much improved auto-completion support. + + We plan to remove support for switching import modes in the future, but if you would like to revert to the previous default (bearing in mind auto-completion might stop working), configure the `MarGocodeCtl` reducer as follows: + + ```go + &golang.MarGocodeCtl{ + ImporterMode: golang.SrcImporterWithFallback, + } + ``` + +- The Go/TypeCheck linter is now more complete and should be able to type-check (without failure) all packages for which auto-completion is available. + This linter offers typechecking (like the gotype tool) but can work on unsaved files and while you type and is faster a full `go install` lint. + + To enable add the following reducer to your `margo.go` file: + + ```go + &golang.TypeCheck{}, + ``` + +- Some HTTP handler snippets have been added and are offered in files that `import "net/http"`. + ## 19.10.22 - API BREAKAGE: diff --git a/gosubl/about.py b/gosubl/about.py index 3398cd53..96ea0fa8 100644 --- a/gosubl/about.py +++ b/gosubl/about.py @@ -1,7 +1,7 @@ import re import sublime -TAG = '19.10.22-1' +TAG = '20.01.01-1' ANN = 'a'+TAG VERSION = 'r'+TAG VERSION_PAT = re.compile(r'\d{2}[.]\d{2}[.]\d{2}-\d+', re.IGNORECASE) diff --git a/src/margo.sh/.github/workflows/margo-ci.yml b/src/margo.sh/.github/workflows/margo-ci.yml new file mode 100644 index 00000000..10f12b9e --- /dev/null +++ b/src/margo.sh/.github/workflows/margo-ci.yml @@ -0,0 +1,22 @@ +on: [push, pull_request] +name: margo-ci +jobs: + margo-ci: + strategy: + matrix: + go-version: [1.12.x, 1.13.x] + platform: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.platform }} + steps: + - name: Setup + uses: actions/setup-go@v1 + with: + go-version: ${{ matrix.go-version }} + - name: Checkout + uses: actions/checkout@v2 + with: + path: src/margo.sh + - name: CI + env: + GOPATH: ${{ github.workspace }} + run: go run margo.sh ci diff --git a/src/margo.sh/.travis.yml b/src/margo.sh/.travis.yml deleted file mode 100644 index ea42ff23..00000000 --- a/src/margo.sh/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false - -language: go - -go: - - 1.11.x - - 1.12.x - - tip - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go_import_path: margo.sh - -script: - - margo.sh ci diff --git a/src/margo.sh/Gopkg.lock b/src/margo.sh/Gopkg.lock index 29cabfde..aa86931c 100644 --- a/src/margo.sh/Gopkg.lock +++ b/src/margo.sh/Gopkg.lock @@ -10,12 +10,20 @@ version = "v1.3.3" [[projects]] - digest = "1:77857b3205f936bdc6928ef347b682ab549cf99454d6c0ca04a49f8df9e418f3" + digest = "1:7cb4fdca4c251b3ef8027c90ea35f70c7b661a593b9eeae34753c65499098bb1" + name = "github.com/cpuguy83/go-md2man" + packages = ["md2man"] + pruneopts = "UT" + revision = "7762f7e404f8416dfa1d9bb6a8c192aa9acb4d19" + version = "v1.0.10" + +[[projects]] + digest = "1:9b702f63b82233857933e900478bbced86c863c43f4cbec0ff76f0f94346230f" name = "github.com/karrick/godirwalk" packages = ["."] pruneopts = "UT" - revision = "73c17a9b9528eb3ce857b782a2816c0cda581e62" - version = "v1.10.12" + revision = "c5347ca0aa3e4cee84147b4da5341df3de70af6c" + version = "v1.13.0" [[projects]] digest = "1:e09ada96a5a41deda4748b1659cc8953961799e798aea557257b56baee4ecaf3" @@ -26,8 +34,16 @@ "semver", ] pruneopts = "UT" - revision = "438578804ca6f31be148c27683afc419ce47c06e" - version = "v1.3.0" + revision = "d89504fbbf2c313df24867a5ffafcc9b847961ff" + version = "v1.5.0" + +[[projects]] + digest = "1:b36a0ede02c4c2aef7df7f91cbbb7bb88a98b5d253509d4f997dda526e50c88c" + name = "github.com/russross/blackfriday" + packages = ["."] + pruneopts = "UT" + revision = "05f3235734ad95d0016f6a23902f06461fcf567a" + version = "v1.5.2" [[projects]] digest = "1:5a1cf4e370bc86137b58da2ae065e76526d32b11f62a7665f36dbd5f41fa95ff" @@ -38,12 +54,12 @@ version = "v1.1.7" [[projects]] - digest = "1:b24d38b282bacf9791408a080f606370efa3d364e4b5fd9ba0f7b87786d3b679" + digest = "1:1d3ef3dd057d2eb1819e945f88cc83835296c9b7fb13ad3194c937c4e2891fee" name = "github.com/urfave/cli" packages = ["."] pruneopts = "UT" - revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1" - version = "v1.20.0" + revision = "bfe2e925cfb6d44b40ad3a779165ea7e8aff9212" + version = "v1.22.0" [[projects]] branch = "master" @@ -51,7 +67,7 @@ name = "golang.org/x/crypto" packages = ["blake2b"] pruneopts = "UT" - revision = "4def268fd1a49955bfb3dda92fe3db4f924f2285" + revision = "b544559bb6d1b5c62fba4af5e843ff542174f079" [[projects]] branch = "master" @@ -59,7 +75,7 @@ name = "golang.org/x/net" packages = ["context"] pruneopts = "UT" - revision = "da137c7871d730100384dbcf36e6f8fa493aef5b" + revision = "ef20fe5d793301b553005db740f730d87993f778" [[projects]] branch = "master" @@ -67,22 +83,22 @@ name = "golang.org/x/sync" packages = ["errgroup"] pruneopts = "UT" - revision = "112230192c580c3556b8cee6403af37a4fc5f28c" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" [[projects]] branch = "master" - digest = "1:a0e4b2275e7939970d395eb6d03f409378eb586aab72e0a461d95e3435c6c5c4" + digest = "1:607da0d7cf677985496d5911647383193cce4c67ba4554c4a54607a6a6062c79" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] pruneopts = "UT" - revision = "fae7ac547cb717d141c433a2a173315e216b64c4" + revision = "6d18c012aee9febd81bbf9806760c8c4480e870d" [[projects]] branch = "master" - digest = "1:bced9cb3cc232146d31c9c0cc4595acfbd892c99c35b1371544fe35f4abaeca5" + digest = "1:1d5cac22c44ed0228c63765477b98c87cb1ac7b7305c76ba21e754cdf60aed29" name = "golang.org/x/tools" packages = [ "cmd/guru", @@ -108,10 +124,11 @@ "internal/imports", "internal/module", "internal/semver", + "internal/span", "refactor/importgraph", ] pruneopts = "UT" - revision = "e377ae9d63860e3b8f606b92a1a7c7df5b14ca67" + revision = "6e064ea0cf2dcfaca8da2da9f42da458bab38756" [[projects]] branch = "master" diff --git a/src/margo.sh/Gopkg.toml b/src/margo.sh/Gopkg.toml index b476c073..619fe14f 100644 --- a/src/margo.sh/Gopkg.toml +++ b/src/margo.sh/Gopkg.toml @@ -13,3 +13,7 @@ required = [ [[constraint]] name = "github.com/rogpeppe/go-internal" version = "1.3.0" + +[[override]] + name = "github.com/russross/blackfriday" + version = "1.5.2" diff --git a/src/margo.sh/cmdpkg/margosublime/main.go b/src/margo.sh/cmdpkg/margosublime/main.go index e4c1592e..8486e0be 100644 --- a/src/margo.sh/cmdpkg/margosublime/main.go +++ b/src/margo.sh/cmdpkg/margosublime/main.go @@ -32,7 +32,7 @@ func Main() { if err != nil { return mgcli.Error("agent creation failed:", err) } - + mg.SetMemoryLimit(ag.Log, mg.DefaultMemoryLimit) ag.Store.SetBaseConfig(sublime.DefaultConfig) if margoExt != nil { margoExt(ag.Args()) diff --git a/src/margo.sh/golang/cursor/curctx.go b/src/margo.sh/golang/cursor/curctx.go index f2f2a6c0..2fd8dfb4 100644 --- a/src/margo.sh/golang/cursor/curctx.go +++ b/src/margo.sh/golang/cursor/curctx.go @@ -295,6 +295,25 @@ func (cx *CurCtx) Contains(typ ast.Node) bool { }) } +func (cx *CurCtx) ImportsMatch(match func(importPath string) bool) bool { + for _, spec := range cx.AstFile.Imports { + p := spec.Path.Value + if len(p) < 3 { + continue + } + if c := p[0]; c == '"' || c == '`' { + p = p[1:] + } + if c := p[len(p)-1]; c == '"' || c == '`' { + p = p[:len(p)-1] + } + if match(p) { + return true + } + } + return false +} + func (cx *CurCtx) Print(x ast.Node) (string, error) { p := &cx.printer p.Lock() diff --git a/src/margo.sh/golang/gocode_suggest.go b/src/margo.sh/golang/gocode_suggest.go index 2bcc5f6c..2efe66e3 100644 --- a/src/margo.sh/golang/gocode_suggest.go +++ b/src/margo.sh/golang/gocode_suggest.go @@ -6,6 +6,7 @@ import ( "go/types" "kuroku.io/margocode/suggest" "margo.sh/golang/gopkg" + "margo.sh/kimporter" "margo.sh/mg" "margo.sh/mgutil" "runtime/debug" @@ -144,6 +145,10 @@ func (gi *gsuImporter) importFromName(pkgName, srcDir string) (pkg *types.Packag } func (gi *gsuImporter) ImportFrom(impPath, srcDir string, mode types.ImportMode) (pkg *types.Package, err error) { + if mctl.cfg().ImporterMode == KimPorter { + return kimporter.New(gi.mx, nil).ImportFrom(impPath, srcDir, mode) + } + // TODO: add mode to the key somehow? // mode is reserved, but currently not used so it's not a problem // but if it's used in the future, the importer result could depend on it diff --git a/src/margo.sh/golang/gopkg/import.go b/src/margo.sh/golang/gopkg/import.go index 4504172b..7935eec3 100644 --- a/src/margo.sh/golang/gopkg/import.go +++ b/src/margo.sh/golang/gopkg/import.go @@ -32,19 +32,30 @@ func ScanFilter(de *vfs.Dirent) bool { } func ImportDir(mx *mg.Ctx, dir string) (*Pkg, error) { + if !filepath.IsAbs(dir) { + return nil, fmt.Errorf("ImportDir: %s is not an absolute path", dir) + } return ImportDirNd(mx, mx.VFS.Poke(dir)) } -func ImportDirNd(mx *mg.Ctx, nd *vfs.Node) (*Pkg, error) { - ls := nd.Ls().Filter(pkgNdFilter).Nodes() +func ImportDirNd(mx *mg.Ctx, dir *vfs.Node) (*Pkg, error) { + return importDirNd(mx, dir, true) +} + +func importDirNd(mx *mg.Ctx, nd *vfs.Node, poke bool) (*Pkg, error) { + var cl *vfs.NodeList + if poke { + cl = nd.Ls() + } else { + cl = nd.Children() + } + ls := cl.Filter(pkgNdFilter).Nodes() if len(ls) == 0 { - return nil, &build.NoGoError{Dir: nd.Path()} - } - memo, err := nd.Memo() - if err != nil { - return nil, err + if poke { + return nil, &build.NoGoError{Dir: nd.Path()} + } + return nil, nil } - bctx := goutil.BuildContext(mx) type K struct{ GOROOT, GOPATH string } type V struct { @@ -52,13 +63,26 @@ func ImportDirNd(mx *mg.Ctx, nd *vfs.Node) (*Pkg, error) { e error } k := K{GOROOT: bctx.GOROOT, GOPATH: bctx.GOPATH} - v := memo.Read(k, func() interface{} { - p, err := importDirNd(mx, nd, bctx, ls) + if !poke { + v, _ := nd.PeekMemo(k).(V) + return v.p, v.e + } + v := nd.ReadMemo(k, func() interface{} { + p, err := importDir(mx, nd, bctx, ls) return V{p: p, e: err} }).(V) return v.p, v.e } +func PeekDir(mx *mg.Ctx, dir string) *Pkg { + return PeekDirNd(mx, mx.VFS.Peek(dir)) +} + +func PeekDirNd(mx *mg.Ctx, dir *vfs.Node) *Pkg { + p, _ := importDirNd(mx, dir, false) + return p +} + func pkgNdFilter(nd *vfs.Node) bool { nm := nd.Name() return nm[0] != '.' && nm[0] != '_' && @@ -67,7 +91,7 @@ func pkgNdFilter(nd *vfs.Node) bool { !strings.HasSuffix(nm, "_test.go") } -func importDirNd(mx *mg.Ctx, nd *vfs.Node, bctx *build.Context, ls []*vfs.Node) (*Pkg, error) { +func importDir(mx *mg.Ctx, nd *vfs.Node, bctx *build.Context, ls []*vfs.Node) (*Pkg, error) { dir := nd.Path() var errNoGo error = &build.NoGoError{Dir: dir} bctx.IsDir = func(p string) bool { @@ -242,6 +266,8 @@ type modDep struct { ModPath string SubPkg string Version string + + oldPath string } func (mf *modFile) requireMD(modPath string) (_ modDep, found bool) { @@ -259,7 +285,11 @@ func (mf *modFile) require(importPath string) (modDep, error) { if !found { return modDep{}, fmt.Errorf("require(%s) not found in %s", importPath, mf.Path) } - md.SubPkg = strings.TrimPrefix(importPath, md.ModPath) + modPath := md.ModPath + if md.oldPath != "" { + modPath = md.oldPath + } + md.SubPkg = strings.TrimPrefix(importPath, modPath) md.SubPkg = strings.TrimLeft(md.SubPkg, "/") return md, nil } @@ -338,7 +368,7 @@ func (mf *modFile) find(mx *mg.Ctx, bctx *build.Context, importPath string, mp * searchOtherVendors, searchGrVendor, } - if !strings.Contains(strings.Split(importPath, "/")[0], ".") { + if !strings.Contains(strings.SplitN(importPath, "/", 2)[0], ".") { // apparently import paths without dots are reserved for the stdlib // checking first also avoids the many misses for each stdlib pkg search = []func() *PkgPath{ @@ -353,6 +383,9 @@ func (mf *modFile) find(mx *mg.Ctx, bctx *build.Context, importPath string, mp * return p, nil } } + if md.oldPath != "" { + return nil, fmt.Errorf("cannot find `%s` replacement `%s` using `%s`", importPath, md.ModPath, mf.Path) + } return nil, fmt.Errorf("cannot find `%s` using `%s`", importPath, mf.Path) } @@ -395,11 +428,11 @@ func loadModSum(mx *mg.Ctx, dir string) (*modFile, error) { for _, r := range mf.File.Replace { md := modDep{ + oldPath: r.Old.Path, ModPath: r.New.Path, Version: r.New.Version, } - // if dir := r.New.Path; modfile.IsDirectoryPath(dir) { - if dir := r.New.Path; dir[0] == '/' || dir[0] == '.' { + if dir := r.New.Path; modfile.IsDirectoryPath(dir) { if !filepath.IsAbs(dir) { dir = filepath.Join(mf.Dir, dir) } diff --git a/src/margo.sh/golang/goutil/goutil_test.go b/src/margo.sh/golang/goutil/goutil_test.go index acfc8321..1edf2c78 100644 --- a/src/margo.sh/golang/goutil/goutil_test.go +++ b/src/margo.sh/golang/goutil/goutil_test.go @@ -1,3 +1,5 @@ +// +build !windows + package goutil import ( diff --git a/src/margo.sh/golang/margocode.go b/src/margo.sh/golang/margocode.go index ac1b83ff..b5aa207d 100644 --- a/src/margo.sh/golang/margocode.go +++ b/src/margo.sh/golang/margocode.go @@ -32,8 +32,8 @@ import ( ) const ( - // SrcImporterWithFallback tells the importer use source code, then fall-back to a binary package - SrcImporterWithFallback ImporterMode = iota + // KimPorter tells the importer to use Kim-Porter to import packages + KimPorter ImporterMode = iota // SrcImporterOnly tells the importer use source only, with no fall-back SrcImporterOnly @@ -41,8 +41,8 @@ const ( // BinImporterOnly tells the importer use binary packages only, with no fall-back BinImporterOnly - // KimPorter tells the importer to use Kim-Porter to import packages - KimPorter + // SrcImporterWithFallback tells the importer use source code, then fall-back to a binary package + SrcImporterWithFallback ) var ( @@ -77,9 +77,6 @@ func (mgc *marGocodeCtl) importerFactories() (newDefaultImporter, newFallbackImp s := mgc.newSrcImporter b := mgc.newBinImporter switch mgc.cfg().ImporterMode { - case KimPorter: - // kp doesn't yet support cgo, so fall back to the binary importer - return mgc.newKimPorter, b, true case SrcImporterWithFallback: return s, b, true case SrcImporterOnly: @@ -91,10 +88,6 @@ func (mgc *marGocodeCtl) importerFactories() (newDefaultImporter, newFallbackImp } } -func (mgc *marGocodeCtl) newKimPorter(mx *mg.Ctx, overlay types.ImporterFrom) types.ImporterFrom { - return kimporter.New(mx, nil) -} - // importPathByName returns an import path whose pkg's name is pkgName func (mgc *marGocodeCtl) importPathByName(pkgName, srcDir string) string { pkl := mgc.plst.View().ByName[pkgName] diff --git a/src/margo.sh/golang/snippets/http-snippet.go b/src/margo.sh/golang/snippets/http-snippet.go new file mode 100644 index 00000000..e06c37ff --- /dev/null +++ b/src/margo.sh/golang/snippets/http-snippet.go @@ -0,0 +1,60 @@ +package snippets + +import ( + "margo.sh/golang/cursor" + "margo.sh/mg" +) + +func HTTPSnippet(cx *cursor.CurCtx) []mg.Completion { + switch { + case !cx.ImportsMatch(func(p string) bool { return p == "net/http" }): + return nil + case cx.Scope.Is(cursor.BlockScope): + return []mg.Completion{ + mg.Completion{ + Query: `http.HandleFunc`, + Title: `http.HandleFunc("...", func(w, r))`, + Src: ` + http.HandleFunc("/${1}", func(w http.ResponseWriter, r *http.Request) { + $0 + }) + `, + }, + } + case cx.Scope.Is(cursor.ExprScope): + return []mg.Completion{ + mg.Completion{ + Query: `http.HandlerFunc`, + Title: `http.HandlerFunc(func(w, r))`, + Src: ` + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + $0 + }) + `, + }, + mg.Completion{ + Query: `func http handler`, + Title: `func(w, r)`, + Src: ` + func(w http.ResponseWriter, r *http.Request) { + $0 + } + `, + }, + } + case cx.Scope.Is(cursor.FileScope): + return []mg.Completion{ + mg.Completion{ + Query: `func http handler`, + Title: `func(w, r)`, + Src: ` + func ${1:name}(w http.ResponseWriter, r *http.Request) { + $0 + } + `, + }, + } + default: + return nil + } +} diff --git a/src/margo.sh/golang/snippets/snippets.go b/src/margo.sh/golang/snippets/snippets.go index 043072dc..d2af43a5 100644 --- a/src/margo.sh/golang/snippets/snippets.go +++ b/src/margo.sh/golang/snippets/snippets.go @@ -20,6 +20,7 @@ var ( DeferSnippet, MutexSnippet, ReturnSnippet, + HTTPSnippet, } ) diff --git a/src/margo.sh/golang/typecheck.go b/src/margo.sh/golang/typecheck.go index 2cfee93d..68c1cdb7 100644 --- a/src/margo.sh/golang/typecheck.go +++ b/src/margo.sh/golang/typecheck.go @@ -6,10 +6,10 @@ import ( "go/scanner" "go/token" "go/types" - "margo.sh/golang/gopkg" "margo.sh/golang/goutil" "margo.sh/kimporter" "margo.sh/mg" + "margo.sh/mgpf" "margo.sh/mgutil" "path/filepath" "strings" @@ -51,36 +51,43 @@ func (tc *TypeCheck) checker() { func (tc *TypeCheck) check(mx *mg.Ctx) { defer mx.Begin(mg.Task{Title: "Go/TypeCheck"}).Done() - - start := time.Now() + pf := mgpf.NewProfile("Go/TypeCheck") defer func() { - if d := time.Since(start); d > 200*time.Millisecond { - mx.Log.Dbg.Println("T/C") + if pf.Dur().Duration < 100*time.Millisecond { + return } + mx.Profile.Fprint(mx.Log.Dbg.Writer(), &mgpf.PrintOpts{ + MinDuration: 10 * time.Millisecond, + }) }() - + mx = mx.Copy(func(mx *mg.Ctx) { mx.Profile = pf }) v := mx.View - dir := v.Dir() - importPath := "_" - if p, err := gopkg.ImportDir(mx, dir); err == nil { - importPath = p.ImportPath - } - kp := kimporter.New(mx, nil) - fset, files, err := tc.parseFiles(mx) - issues := tc.errToIssues(err) - if err == nil && len(files) != 0 { - cfg := types.Config{ - FakeImportC: true, - Error: func(err error) { - issues = append(issues, tc.errToIssues(err)...) - }, - Importer: kp, + + src, _ := v.ReadAll() + issues := []mg.Issue{} + if v.Path == "" { + pf := goutil.ParseFile(mx, v.Name, src) + issues = append(issues, tc.errToIssues(v, pf.Error)...) + if pf.Error == nil { + tcfg := types.Config{ + IgnoreFuncBodies: true, + FakeImportC: true, + Error: func(err error) { + issues = append(issues, tc.errToIssues(v, err)...) + }, + Importer: kimporter.New(mx, nil), + } + tcfg.Check("_", pf.Fset, []*ast.File{pf.AstFile}, nil) } - _, err = cfg.Check(importPath, fset, files, nil) - issues = append(issues, tc.errToIssues(err)...) - } - if err != nil && len(issues) == 0 { - issues = append(issues, mg.Issue{Message: err.Error()}) + } else { + kp := kimporter.New(mx, &kimporter.Config{ + CheckFuncs: true, + CheckImports: true, + Tests: strings.HasSuffix(v.Filename(), "_test.go"), + SrcMap: map[string][]byte{v.Filename(): src}, + }) + _, err := kp.ImportFrom(".", v.Dir(), 0) + issues = append(issues, tc.errToIssues(v, err)...) } for i, isu := range issues { if isu.Path == "" { @@ -101,10 +108,9 @@ func (tc *TypeCheck) check(mx *mg.Ctx) { func (tc *TypeCheck) parseFiles(mx *mg.Ctx) (*token.FileSet, []*ast.File, error) { v := mx.View - fn := v.Filename() src, _ := v.ReadAll() if v.Path == "" { - pf := goutil.ParseFile(mx, fn, src) + pf := goutil.ParseFile(mx, v.Name, src) files := []*ast.File{pf.AstFile} if files[0] == nil { files = nil @@ -120,6 +126,7 @@ func (tc *TypeCheck) parseFiles(mx *mg.Ctx) (*token.FileSet, []*ast.File, error) } fset := token.NewFileSet() // TODO: caching... + fn := v.Filename() af, err := parser.ParseFile(fset, fn, src, parser.ParseComments) if err != nil { return nil, nil, err @@ -143,13 +150,16 @@ func (tc *TypeCheck) parseFiles(mx *mg.Ctx) (*token.FileSet, []*ast.File, error) return fset, files, nil } -func (tc *TypeCheck) errToIssues(err error) mg.IssueSet { +func (tc *TypeCheck) errToIssues(v *mg.View, err error) mg.IssueSet { var issues mg.IssueSet switch e := err.(type) { + case nil: case scanner.ErrorList: for _, err := range e { - issues = append(issues, tc.errToIssues(err)...) + issues = append(issues, tc.errToIssues(v, err)...) } + case mg.Issue: + issues = append(issues, e) case scanner.Error: issues = append(issues, mg.Issue{ Row: e.Pos.Line - 1, @@ -164,6 +174,11 @@ func (tc *TypeCheck) errToIssues(err error) mg.IssueSet { Col: p.Column - 1, Message: e.Msg, }) + default: + issues = append(issues, mg.Issue{ + Name: v.Name, + Message: err.Error(), + }) } return issues } diff --git a/src/margo.sh/kimporter/kimporter.go b/src/margo.sh/kimporter/kimporter.go index 4122c8d3..233340bc 100644 --- a/src/margo.sh/kimporter/kimporter.go +++ b/src/margo.sh/kimporter/kimporter.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "go/ast" "go/build" "go/token" "go/types" @@ -13,19 +14,20 @@ import ( "golang.org/x/tools/go/gcexportdata" "margo.sh/golang/gopkg" "margo.sh/golang/goutil" + "margo.sh/memo" "margo.sh/mg" "margo.sh/mgutil" "os" "os/exec" "path/filepath" "sort" + "strconv" "strings" "sync" ) var ( - sharedCache = &stateCache{m: map[stateKey]*state{}} - pkgC = func() *types.Package { + pkgC = func() *types.Package { p := types.NewPackage("C", "C") p.MarkComplete() return p @@ -43,49 +45,68 @@ type stateKey struct { GOOS string GOROOT string GOPATH string - SrcMapHash string + NoHash bool } -type stateCache struct { - mu sync.Mutex - m map[stateKey]*state +func globalState(mx *mg.Ctx, k stateKey) *state { + type K struct{ stateKey } + return mx.VFS.ReadMemo(k.Dir, K{k}, func() memo.V { + return &state{stateKey: k} + }).(*state) } -func (sc *stateCache) state(mx *mg.Ctx, k stateKey) *state { - // TODO: support vfs invalidation. - // we can't (currently) make use of .Memo because it deletes the data - sc.mu.Lock() - defer sc.mu.Unlock() +type state struct { + stateKey + chkAt mgutil.AtomicInt + invAt mgutil.AtomicInt + imby struct { + sync.Mutex + l []*state + } + mu sync.Mutex + err error + pkg *types.Package + hash string +} - if v, ok := sc.m[k]; ok { - return v +func (ks *state) invalidate(invAt int64) { + ks.invAt.Set(invAt) + ks.imby.Lock() + l := ks.imby.l + ks.imby.Unlock() + for _, p := range l { + p.invalidate(invAt) } - v := &state{stateKey: k} - sc.m[k] = v - return v } -type state struct { - stateKey +func (ks *state) InvalidateMemo(invAt int64) { + ks.invalidate(invAt) +} - mu sync.Mutex - err error - pkg *types.Package - checked bool +func (ks *stateKey) targets(pp *gopkg.PkgPath) bool { + return ks.ImportPath == pp.ImportPath || ks.Dir == pp.Dir } -func (ks *state) reset() { - ks.pkg = nil - ks.err = nil - ks.checked = false +func (ks *state) importedBy(p *state) { + ks.imby.Lock() + defer ks.imby.Unlock() + + for _, q := range ks.imby.l { + if p == q { + return + } + } + ks.imby.l = append(ks.imby.l[:len(ks.imby.l):len(ks.imby.l)], p) +} + +func (ks *state) valid(hash string) bool { + return ks.chkAt.N() > ks.invAt.N() && (ks.NoHash || ks.hash == hash) } func (ks *state) result() (*types.Package, error) { switch { - case !ks.checked: - return nil, fmt.Errorf("import cycle via %s", ks.ImportPath) case ks.err != nil: - return ks.pkg, ks.err + return nil, ks.err case !ks.pkg.Complete(): // Package exists but is not complete - we cannot handle this // at the moment since the source importer replaces the package @@ -125,11 +146,12 @@ func (kp *Importer) ImportFrom(ipath, srcDir string, mode types.ImportMode) (*ty if mode != 0 { panic("non-zero import mode") } - if ipath == "C" { - return pkgC, nil - } - if ipath == "unsafe" { - return types.Unsafe, nil + return kp.importFrom(ipath, srcDir) +} + +func (kp *Importer) importFrom(ipath, srcDir string) (*types.Package, error) { + if pkg := kp.importFakePkg(ipath); pkg != nil { + return pkg, nil } if p, err := filepath.Abs(srcDir); err == nil { srcDir = p @@ -145,7 +167,9 @@ func (kp *Importer) ImportFrom(ipath, srcDir string, mode types.ImportMode) (*ty } func (kp *Importer) findPkg(ipath, srcDir string) (*gopkg.PkgPath, error) { - return kp.mp.FindPkg(kp.mx, ipath, srcDir) + kp.mx.Profile.Push(`Kim-Porter: findPkg(` + ipath + `)`).Pop() + pp, err := kp.mp.FindPkg(kp.mx, ipath, srcDir) + return pp, err } func (kp *Importer) stateKey(pp *gopkg.PkgPath) stateKey { @@ -160,50 +184,56 @@ func (kp *Importer) stateKey(pp *gopkg.PkgPath) stateKey { GOOS: kp.bld.GOOS, GOARCH: kp.bld.GOARCH, GOROOT: kp.bld.GOROOT, - SrcMapHash: kp.hash, GOPATH: strings.Join(mgutil.PathList(kp.bld.GOPATH), string(filepath.ListSeparator)), + NoHash: kp.hash == "", } } func (kp *Importer) state(pp *gopkg.PkgPath) *state { - return sharedCache.state(kp.mx, kp.stateKey(pp)) + return globalState(kp.mx, kp.stateKey(pp)) } -func (kp *Importer) detectCycle(ks *state) error { - l := []string{ks.ImportPath} - for p := kp.par; p != nil; p = p.par { - if p.ks == nil { +func (kp *Importer) detectCycle(pp *gopkg.PkgPath) error { + defer kp.mx.Profile.Start(`Kim-Porter: detectCycle()`).Stop() + + for p := kp; p != nil; p = p.par { + if p.ks == nil || !p.ks.targets(pp) { continue } - if p.ks.ImportPath != "" { - l = append(l, p.ks.ImportPath) - } - if p.ks.Dir == ks.Dir { - return fmt.Errorf("import cycle: %s", strings.Join(l, " <~ ")) + l := []string{pp.ImportPath + "(" + pp.Dir + ")"} + for p := kp; ; p = p.par { + if p.ks == nil { + continue + } + l = append(l, p.ks.ImportPath+"("+p.ks.Dir+")") + if p.ks.targets(pp) { + return fmt.Errorf("import cycle: %s", strings.Join(l, " <~ ")) + } } } return nil } -func (kp *Importer) importPkg(pp *gopkg.PkgPath) (*types.Package, error) { - title := "Kim-Porter: import(" + pp.Dir + ")" +func (kp *Importer) importPkg(pp *gopkg.PkgPath) (pkg *types.Package, err error) { + title := `Kim-Porter: import(` + pp.ImportPath + `)` defer kp.mx.Profile.Push(title).Pop() defer kp.mx.Begin(mg.Task{Title: title}).Done() - ks := kp.state(pp) - kx := kp.branch(ks, pp) - if err := kx.detectCycle(ks); err != nil { + if err := kp.detectCycle(pp); err != nil { return nil, err } + ks := kp.state(pp) + kx := kp.branch(ks, pp) ks.mu.Lock() defer ks.mu.Unlock() - if ks.checked { + if ks.valid(kp.hash) { return ks.result() } - ks.reset() - ks.checked = true + chkAt := memo.InvAt() ks.pkg, ks.err = kx.check(ks, pp) + ks.hash = kp.hash + ks.chkAt.Set(chkAt) return ks.result() } @@ -214,7 +244,7 @@ func (kp *Importer) check(ks *state, pp *gopkg.PkgPath) (*types.Package, error) return nil, err } - imports, err := kp.loadImports(ks, bp) + imports, err := kp.importDeps(ks, bp, fset, astFiles) if err != nil { return nil, err } @@ -226,6 +256,7 @@ func (kp *Importer) check(ks *state, pp *gopkg.PkgPath) (*types.Package, error) } } + defer kp.mx.Profile.Push(`Kim-Porter: typecheck(` + ks.ImportPath + `)`).Pop() var hardErr error tc := types.Config{ FakeImportC: true, @@ -250,7 +281,7 @@ func (kp *Importer) importCgoPkg(pp *gopkg.PkgPath, imports map[string]*types.Pa name := `go` args := []string{`list`, `-e`, `-export`, `-f={{.Export}}`, pp.Dir} ctx, cancel := context.WithCancel(context.Background()) - title := mgutil.QuoteCmd(name, args...) + title := `Kim-Porter: importCgoPkg` + mgutil.QuoteCmd(name, args...) + `)` defer kp.mx.Profile.Push(title).Pop() defer kp.mx.Begin(mg.Task{Title: title, Cancel: cancel}).Done() @@ -279,19 +310,49 @@ func (kp *Importer) importCgoPkg(pp *gopkg.PkgPath, imports map[string]*types.Pa return pkg, nil } -func (kp *Importer) loadImports(ks *state, bp *build.Package) (map[string]*types.Package, error) { +func (kp *Importer) importFakePkg(ipath string) *types.Package { + switch ipath { + case "unsafe": + return types.Unsafe + case "C": + return pkgC + } + return nil +} + +func (kp *Importer) importDeps(ks *state, bp *build.Package, fset *token.FileSet, astFiles []*ast.File) (map[string]*types.Package, error) { + defer kp.mx.Profile.Push(`Kim-Porter: importDeps(` + ks.ImportPath + `)`).Pop() + paths := mgutil.StrSet(bp.Imports) if ks.Tests { paths = paths.Add(bp.TestImports...) } - imports := make(map[string]*types.Package, len(paths)) mu := sync.Mutex{} + imports := make(map[string]*types.Package, len(paths)) doImport := func(ipath string) error { - pkg, err := kp.ImportFrom(ipath, bp.Dir, 0) + pkg, err := kp.importFrom(ipath, bp.Dir) if err == nil { mu.Lock() imports[ipath] = pkg mu.Unlock() + return nil + } + for _, af := range astFiles { + for _, spec := range af.Imports { + if spec.Path == nil { + continue + } + s, _ := strconv.Unquote(spec.Path.Value) + if ipath != s { + continue + } + tp := fset.Position(spec.Pos()) + return mg.Issue{ + Row: tp.Line - 1, + Col: tp.Column - 1, + Message: err.Error(), + } + } } return err } @@ -317,7 +378,6 @@ func (kp *Importer) loadImports(ks *state, bp *build.Package) (map[string]*types } } return nil - }) } return imports, errg.Wait() @@ -340,10 +400,15 @@ func (kp *Importer) branch(ks *state, pp *gopkg.PkgPath) *Importer { if pp.Mod != nil { kx.mp = pp.Mod } + if kp.ks != nil { + // TODO: we need clear this if it's no longer true + ks.importedBy(kp.ks) + } // user settings don't apply when checking deps kx.cfg.CheckFuncs = false kx.cfg.CheckImports = false kx.cfg.Tests = false + kx.hash = "" kx.ks = ks kx.par = kp kx.setupJs(pp) @@ -352,6 +417,7 @@ func (kp *Importer) branch(ks *state, pp *gopkg.PkgPath) *Importer { func New(mx *mg.Ctx, cfg *Config) *Importer { bld := goutil.BuildContext(mx) + bld.BuildTags = append(bld.BuildTags, "netgo", "osusergo") kp := &Importer{ mx: mx, bld: bld, diff --git a/src/margo.sh/kimporter/kimporter_test.go b/src/margo.sh/kimporter/kimporter_test.go deleted file mode 100644 index 3d7a6212..00000000 --- a/src/margo.sh/kimporter/kimporter_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package kimporter - -import ( - "go/build" - "margo.sh/mg" - "os" - "testing" - "time" -) - -func init() { - build.Default.GOPATH = "/user/gp/" - os.Setenv("GOPATH", build.Default.GOPATH) -} - -func Test(t *testing.T) { - mx := mg.NewTestingAgent(nil, nil, os.Stderr).Store.NewCtx(nil) - mx.Log.Dbg.SetFlags(0) - mx.Log.Dbg.SetPrefix("") - - ipath := "." - srcDirs := []string{ - "/user/gp/src/github.com/faiface/pixel/pixelgl/", - "/krku/src/oya.to/fabricant/", - } - for _, srcDir := range srcDirs { - for i := 0; i < 1; i++ { - kp := New(mx, &Config{Tests: true, NoConcurrency: false}) - start := time.Now() - pkg, err := kp.ImportFrom(ipath, srcDir, 0) - mx.Log.Dbg.Println("pk:", srcDir, "\n> ", pkg, "\n> ", time.Since(start), pkg != nil && pkg.Complete(), err) - } - } -} diff --git a/src/margo.sh/kimporter/parse.go b/src/margo.sh/kimporter/parse.go index b6c20819..f9d4b2a3 100644 --- a/src/margo.sh/kimporter/parse.go +++ b/src/margo.sh/kimporter/parse.go @@ -45,6 +45,8 @@ func (kf *kpFile) init() { } func parseDir(mx *mg.Ctx, bcx *build.Context, fset *token.FileSet, dir string, srcMap map[string][]byte, ks *state) (*build.Package, []*kpFile, []*ast.File, error) { + defer mx.Profile.Push(`Kim-Porter: parseDir(` + dir + `)`).Pop() + bp, err := bcx.ImportDir(dir, 0) if err != nil { return nil, nil, nil, err diff --git a/src/margo.sh/memo/memo.go b/src/margo.sh/memo/memo.go new file mode 100644 index 00000000..edb4e327 --- /dev/null +++ b/src/margo.sh/memo/memo.go @@ -0,0 +1,158 @@ +package memo + +import ( + "sync" + "sync/atomic" +) + +var ( + invAtState int64 +) + +type K = interface{} + +type V = interface{} + +type Sticky interface { + InvalidateMemo(invAt int64) +} + +func InvAt() int64 { + return atomic.AddInt64(&invAtState, 1) +} + +type memo struct { + k K + sync.Mutex + v V +} + +func (m *memo) value() V { + if m == nil { + return nil + } + m.Lock() + defer m.Unlock() + + return m.v +} + +type M struct { + mu sync.Mutex + ml []*memo +} + +func (m *M) index(k K) (int, *memo) { + for i, p := range m.ml { + if p.k == k { + return i, p + } + } + return -1, nil +} + +func (m *M) memo(k K) *memo { + m.mu.Lock() + defer m.mu.Unlock() + + _, p := m.index(k) + if p == nil { + p = &memo{k: k} + m.ml = append(m.ml, p) + } + return p +} + +func (m *M) Peek(k K) V { + if m == nil { + return nil + } + + m.mu.Lock() + _, p := m.index(k) + m.mu.Unlock() + + return p.value() +} + +func (m *M) Read(k K, new func() V) V { + if m == nil { + return new() + } + + p := m.memo(k) + p.Lock() + defer p.Unlock() + + if p.v != nil { + return p.v + } + p.v = new() + if p.v != nil { + return p.v + } + m.Del(k) + return nil +} + +func (m *M) Del(k K) { + if m == nil { + return + } + + m.mu.Lock() + defer m.mu.Unlock() + + i, _ := m.index(k) + if i < 0 { + return + } + + m.ml[i] = m.ml[len(m.ml)-1] + m.ml[len(m.ml)-1] = nil + m.ml = m.ml[:len(m.ml)-1] +} + +func (m *M) Clear() { + if m == nil { + return + } + invAt := InvAt() + stkl := m.clear() + for _, stk := range stkl { + stk.InvalidateMemo(invAt) + } +} + +func (m *M) clear() []Sticky { + m.mu.Lock() + defer m.mu.Unlock() + + ml := m.ml + m.ml = nil + stkl := []Sticky{} + for _, p := range ml { + if stk, ok := p.value().(Sticky); ok { + m.ml = append(m.ml, p) + stkl = append(stkl, stk) + } + } + return stkl +} + +func (m *M) Values() map[K]V { + if m == nil { + return nil + } + + m.mu.Lock() + defer m.mu.Unlock() + + vals := make(map[K]V, len(m.ml)) + for k, p := range m.ml { + if v := p.value(); v != nil { + vals[k] = v + } + } + return vals +} diff --git a/src/margo.sh/mg/action.go b/src/margo.sh/mg/action.go index 3bd3ecc6..8109dbe6 100644 --- a/src/margo.sh/mg/action.go +++ b/src/margo.sh/mg/action.go @@ -2,6 +2,7 @@ package mg import ( "margo.sh/mg/actions" + "reflect" ) var ( @@ -103,3 +104,66 @@ type ViewSaved struct{ ActionType } type ViewLoaded struct{ ActionType } type unmount struct{ ActionType } + +type ctxActs struct { + l []Action + i int +} + +func (a *ctxActs) Len() int { + return len(a.List()) +} + +func (a *ctxActs) Index() int { + if a.Len() == 0 { + return -1 + } + return a.i +} + +func (a *ctxActs) Current() Action { + i := a.Index() + if i < 0 || i >= a.Len() { + return nil + } + return a.l[i] +} + +func (a *ctxActs) First() bool { + return a.Index() == 0 +} + +func (a *ctxActs) Last() bool { + return a.Index() == a.Len()-1 +} + +func (a *ctxActs) List() []Action { + if a == nil { + return nil + } + return a.l +} + +func (a *ctxActs) Include(actions ...Action) bool { + for _, p := range a.List() { + pt := reflect.TypeOf(p) + for _, q := range actions { + if reflect.TypeOf(q) == pt { + return true + } + } + } + return false +} + +func (a *ctxActs) Set(actPtr interface{}) bool { + p := reflect.ValueOf(actPtr).Elem() + for _, v := range a.List() { + q := reflect.ValueOf(v) + if p.Type() == q.Type() { + p.Set(q) + return true + } + } + return false +} diff --git a/src/margo.sh/mg/agent.go b/src/margo.sh/mg/agent.go index 43aa5483..6ad1b510 100644 --- a/src/margo.sh/mg/agent.go +++ b/src/margo.sh/mg/agent.go @@ -124,6 +124,7 @@ func (rs agentRes) finalize() interface{} { Config interface{} ClientActions []actions.ClientData Status []string + Issues IssueSet } }{} @@ -136,6 +137,8 @@ func (rs agentRes) finalize() interface{} { inSt := &out.State.State outSt := &out.State + outSt.Issues = outSt.Issues.merge(outSt.View, inSt.Issues...) + outSt.Status = make([]string, len(inSt.Status)) for i, s := range inSt.Status { outSt.Status[i] = StatusPrefix + s diff --git a/src/margo.sh/mg/ctx.go b/src/margo.sh/mg/ctx.go index e8c8b3f7..b6bb6d64 100644 --- a/src/margo.sh/mg/ctx.go +++ b/src/margo.sh/mg/ctx.go @@ -37,6 +37,8 @@ type Ctx struct { // e.g. that the view is about to be saved or that it was changed. Action Action `mg.Nillable:"true"` + Acts *ctxActs + // KVMap is an in-memory cache of data with for the lifetime of the Ctx. *KVMap @@ -61,7 +63,7 @@ type Ctx struct { // newCtx creates a new Ctx // if st is nil, the state will be set to the equivalent of Store.state.new() // if p is nil a new Profile will be created with cookie as its name -func newCtx(sto *Store, st *State, act Action, cookie string, p *mgpf.Profile) *Ctx { +func newCtx(sto *Store, st *State, acts *ctxActs, cookie string, p *mgpf.Profile, kv *KVMap) *Ctx { if st == nil { st = sto.state.new() } @@ -71,10 +73,14 @@ func newCtx(sto *Store, st *State, act Action, cookie string, p *mgpf.Profile) * if p == nil { p = mgpf.NewProfile(cookie) } + if kv == nil { + kv = &KVMap{} + } return &Ctx{ State: st, - Action: act, - KVMap: &KVMap{}, + Action: acts.Current(), + Acts: acts, + KVMap: kv, Store: sto, Log: sto.ag.Log, Cookie: cookie, @@ -183,8 +189,9 @@ func (mx *Ctx) Begin(t Task) *TaskTicket { return mx.Store.Begin(t) } -func (mx *Ctx) Defer(f ReduceFn) { +func (mx *Ctx) Defer(f ReduceFn) *State { mx.defr.prepend(f) + return mx.State } type redFns struct { diff --git a/src/margo.sh/mg/issue.go b/src/margo.sh/mg/issue.go index 4fd5f33f..5be91991 100644 --- a/src/margo.sh/mg/issue.go +++ b/src/margo.sh/mg/issue.go @@ -2,6 +2,7 @@ package mg import ( "bytes" + "fmt" "margo.sh/htm" "margo.sh/mgutil" "os" @@ -79,11 +80,34 @@ type Issue struct { Message string } -func (isu *Issue) finalize() Issue { +func (isu Issue) Error() string { + msg := isu.Message + pfx := "" + if isu.Tag != "" { + pfx = "[" + string(isu.Tag) + "]" + pfx + } + if isu.Label != "" { + pfx = "[" + isu.Label + "]" + pfx + } + if pfx != "" { + pfx = pfx + ": " + } + fn := isu.Path + if fn == "" { + fn = isu.Name + } + return fmt.Sprintf("%s:%d:%d: %s%s", fn, isu.Row+1, isu.Col+1, pfx, msg) +} + +func (isu *Issue) finalize(view *View) Issue { v := *isu if v.Tag == "" { v.Tag = Error } + if isu.InView(view) { + v.Path = "" + v.Name = view.Name + } return v } @@ -111,13 +135,9 @@ func (isu *Issue) SameFile(p Issue) bool { } func (isu *Issue) InView(v *View) bool { - if isu.Path != "" && isu.Path == v.Path { - return true - } - if isu.Name != "" && isu.Name == v.Name { - return true - } - return false + return (isu.Name != "" && isu.Name == v.Name) || + (isu.Path != "" && isu.Path == v.Path) || + (isu.Path != "" && filepath.Base(isu.Path) == v.Name) } func (isu *Issue) Valid() bool { @@ -139,18 +159,29 @@ func (s IssueSet) Equal(issues IssueSet) bool { } func (s IssueSet) Add(l ...Issue) IssueSet { - m := make(map[issueHash]*Issue, len(s)+len(l)) - for _, lst := range []IssueSet{s, IssueSet(l)} { - for i, _ := range lst { - isu := &lst[i] - m[isu.hash()] = isu - } + return s.merge(nil, l...) +} + +func (s IssueSet) merge(view *View, l ...Issue) IssueSet { + if len(l) == 0 { + return s } - s = make(IssueSet, 0, len(m)) - for _, isu := range m { - s = append(s, isu.finalize()) + res := make(IssueSet, 0, len(s)+len(l)) + seen := make(map[issueHash]bool, cap(res)) + for _, isus := range [][]Issue{s, l} { + for _, isu := range isus { + if view != nil { + isu = isu.finalize(view) + } + ish := isu.hash() + if seen[ish] { + continue + } + seen[ish] = true + res = append(res, isu) + } } - return s + return res } func (s IssueSet) Remove(l ...Issue) IssueSet { diff --git a/src/margo.sh/mg/issue_test.go b/src/margo.sh/mg/issue_test.go index cce98a5a..5d7c07eb 100644 --- a/src/margo.sh/mg/issue_test.go +++ b/src/margo.sh/mg/issue_test.go @@ -1,3 +1,5 @@ +// +build !windows + package mg import ( diff --git a/src/margo.sh/mg/oom.go b/src/margo.sh/mg/oom.go new file mode 100644 index 00000000..a232a7bb --- /dev/null +++ b/src/margo.sh/mg/oom.go @@ -0,0 +1,5 @@ +package mg + +const ( + DefaultMemoryLimit = 2 << 30 +) diff --git a/src/margo.sh/mg/oom_nix.go b/src/margo.sh/mg/oom_nix.go new file mode 100644 index 00000000..1d0ed414 --- /dev/null +++ b/src/margo.sh/mg/oom_nix.go @@ -0,0 +1,26 @@ +//+build !windows + +package mg + +import ( + "syscall" +) + +func SetMemoryLimit(logs interface { + Printf(string, ...interface{}) +}, b uint64) { + rlim := &syscall.Rlimit{Cur: b, Max: b} + if err := syscall.Getrlimit(syscall.RLIMIT_DATA, rlim); err != nil { + logs.Printf("SetMemoryLimit: cannot get RLIMIT_DATA: %s\n", err) + return + } + rlim.Cur = b + mib := b / (1 << 20) + if err := syscall.Setrlimit(syscall.RLIMIT_DATA, rlim); err != nil { + logs.Printf("SetMemoryLimit: limit=%dMiB, cannot set RLIMIT_DATA: %s\n", rlim.Cur/(1<<20), err) + return + } + // re-read it so we see what it was actually set to + syscall.Getrlimit(syscall.RLIMIT_DATA, rlim) + logs.Printf("SetMemoryLimit: limit=%dMiB, RLIMIT_DATA={Cur: %dMiB, Max:%dMiB}\n", mib, rlim.Cur/(1<<20), rlim.Max/(1<<20)) +} diff --git a/src/margo.sh/mg/oom_win.go b/src/margo.sh/mg/oom_win.go new file mode 100644 index 00000000..f3eb175f --- /dev/null +++ b/src/margo.sh/mg/oom_win.go @@ -0,0 +1,9 @@ +//+build windows + +package mg + +func SetMemoryLimit(logs interface { + Printf(string, ...interface{}) +}, b uint64) { + logs.Printf("SetMemoryLimit: not supported on Windows") +} diff --git a/src/margo.sh/mg/restart.go b/src/margo.sh/mg/restart.go index e9dbc1ed..b5ad7614 100644 --- a/src/margo.sh/mg/restart.go +++ b/src/margo.sh/mg/restart.go @@ -65,16 +65,6 @@ func (rs *restartSupport) loop() { } func (rs *restartSupport) mgPkg(mx *Ctx) *build.Package { - v := mx.View - if !strings.HasSuffix(v.Path, ".go") || strings.HasSuffix(v.Path, "_test.go") { - return nil - } - - src, _ := mx.View.ReadAll() - if bytes.Contains(src, []byte(`//margo:no-restart`)) { - return nil - } - pkg, _ := youtsuba.AgentBuildContext.ImportDir(mx.View.Dir(), 0) if pkg == nil || pkg.ImportPath == "" { return nil @@ -101,13 +91,23 @@ func (rs *restartSupport) onInit(mx *Ctx) { } func (rs *restartSupport) onSave(mx *Ctx) { + v := mx.View + if !strings.HasSuffix(v.Path, ".go") || strings.HasSuffix(v.Path, "_test.go") { + return + } pkg := rs.mgPkg(mx) if pkg == nil { return } - res := rsIssues{issues: rs.slowLint(mx, pkg)} mx.Store.Dispatch(res) + if mx.Env.Get("MARGO_NO_RESTART", "") == "1" { + return + } + src, _ := mx.View.ReadAll() + if s := []byte(`//margo:no-restart`); bytes.Contains(src, s) { + return + } if len(res.issues) == 0 { mx.Log.Println(pkg.ImportPath, "saved with no issues, restarting") mx.Store.Dispatch(Restart{}) diff --git a/src/margo.sh/mg/store.go b/src/margo.sh/mg/store.go index 7bbb554f..9c74b7c8 100644 --- a/src/margo.sh/mg/store.go +++ b/src/margo.sh/mg/store.go @@ -24,7 +24,7 @@ type storeReducers struct { after reducerList } -func (sr storeReducers) Reduce(mx *Ctx) *Ctx { +func (sr storeReducers) reduction(mx *Ctx) *Ctx { mx.Profile.Do("Before", func() { mx = sr.before.reduction(mx) }) @@ -146,10 +146,16 @@ func (sto *Store) dispatcher() { } } -func (sto *Store) handleReduce(mx *Ctx) *Ctx { - defer mx.Profile.Push("action|" + ActionLabel(mx.Action)).Pop() - - return sto.reducers.Reduce(mx) +func (sto *Store) handleReduction(mx *Ctx, cookie string, pf *mgpf.Profile) *Ctx { + for mx.Acts.i = 0; mx.Acts.i < len(mx.Acts.l); mx.Acts.i++ { + st := mx.State.new() + st.Errors = mx.State.Errors + mx = newCtx(sto, st, mx.Acts, cookie, pf, mx.KVMap) + mx.Profile.Do("action|"+ActionLabel(mx.Action), func() { + mx = sto.reducers.reduction(mx) + }) + } + return mx } func (sto *Store) handle(h func() *Ctx, p *mgpf.Profile) { @@ -173,37 +179,30 @@ func (sto *Store) handleAct(act Action, p *mgpf.Profile) { p = mgpf.NewProfile("") } sto.handle(func() *Ctx { - mx := newCtx(sto, nil, act, "", p) - return sto.handleReduce(mx) + mx := newCtx(sto, nil, &ctxActs{l: []Action{act}}, "", p, nil) + return sto.handleReduction(mx, "", p) }, p) } func (sto *Store) handleReq(rq *agentReq) { sto.handle(func() *Ctx { - newMx := func(st *State, act Action) *Ctx { - return newCtx(sto, st, act, rq.Cookie, rq.Profile) - } - mx, acts := sto.handleReqInit(rq, newMx(nil, nil)) - for _, act := range acts { - st := mx.State.new() - st.Errors = mx.State.Errors - mx = newMx(st, act) - mx = sto.handleReduce(mx) - } - return mx + mx := sto.handleReqInit(rq, newCtx(sto, nil, nil, rq.Cookie, rq.Profile, nil)) + return sto.handleReduction(mx, rq.Cookie, rq.Profile) }, rq.Profile) } -func (sto *Store) handleReqInit(rq *agentReq, mx *Ctx) (*Ctx, []Action) { +func (sto *Store) handleReqInit(rq *agentReq, mx *Ctx) *Ctx { defer mx.Profile.Push("init").Pop() - acts := make([]Action, 0, len(rq.Actions)) + if mx.Acts == nil { + mx.Acts = &ctxActs{l: make([]Action, 0, len(rq.Actions))} + } for _, ra := range rq.Actions { act, err := sto.ag.createAction(ra) if err != nil { mx.State = mx.AddErrorf("createAction(%s): %s", ra.Name, err) } else { - acts = append(acts, act) + mx.Acts.l = append(mx.Acts.l, act) } } @@ -223,7 +222,7 @@ func (sto *Store) handleReqInit(rq *agentReq, mx *Ctx) (*Ctx, []Action) { mx.Env = props.Env } mx.Env = sto.autoSwitchInternalGOPATH(mx) - return mx, acts + return mx } // autoSwitchInternalGOPATH returns mx.Env with GOPATH set to the agent's GOPATH @@ -248,7 +247,7 @@ func (sto *Store) NewCtx(act Action) *Ctx { sto.mu.Lock() defer sto.mu.Unlock() - return newCtx(sto, nil, act, "", nil) + return newCtx(sto, nil, &ctxActs{l: []Action{act}}, "", nil, nil) } func newStore(ag *Agent, sub Subscriber) *Store { diff --git a/src/margo.sh/mg/vfs.go b/src/margo.sh/mg/vfs.go index 64ca49ca..70220f46 100644 --- a/src/margo.sh/mg/vfs.go +++ b/src/margo.sh/mg/vfs.go @@ -12,9 +12,13 @@ var ( type vfsCmd struct{ ReducerType } func (vc *vfsCmd) Reduce(mx *Ctx) *State { + v := mx.View switch mx.Action.(type) { + case ViewModified, ViewLoaded: + mx.VFS.Invalidate(v.Name) case ViewSaved: - go vc.sync(mx) + mx.VFS.Invalidate(v.Name) + mx.VFS.Invalidate(v.Filename()) case RunCmd: return mx.AddBuiltinCmds(BuiltinCmd{ Name: ".vfs", @@ -34,14 +38,14 @@ func (vc *vfsCmd) cmd(cx *CmdCtx) { defer cx.Output.Close() if len(cx.Args) == 0 { - vFS.Print(cx.Output) + cx.VFS.Print(cx.Output) return } for _, p := range cx.Args { - nd, pat := &vFS.Node, p + nd, pat := &cx.VFS.Node, p if filepath.IsAbs(p) { - nd, pat = vFS.Peek(filepath.Dir(p)), filepath.Base(p) + nd, pat = cx.VFS.Peek(filepath.Dir(p)), filepath.Base(p) } nd.PrintWithFilter(cx.Output, func(nd *vfs.Node) string { if nd.IsBranch() { @@ -55,12 +59,6 @@ func (vc *vfsCmd) cmd(cx *CmdCtx) { } } -func (vc *vfsCmd) sync(mx *Ctx) { - v := mx.View - vFS.Invalidate(v.Filename()) - vFS.Invalidate(v.Dir()) -} - func init() { DefaultReducers.Before(&vfsCmd{}) } diff --git a/src/margo.sh/mgpf/mgpf.go b/src/margo.sh/mgpf/mgpf.go index f1dcd924..a38e2ba7 100644 --- a/src/margo.sh/mgpf/mgpf.go +++ b/src/margo.sh/mgpf/mgpf.go @@ -12,7 +12,7 @@ var ( enabled = &mgutil.AtomicBool{} DefaultPrintOpts = PrintOpts{ - Indent: "\t", + Indent: " ", } ) @@ -141,6 +141,14 @@ func (p *Profile) Pop() { }) } +func (p *Profile) Start(name string) *Sample { + s := &Sample{t: time.Now(), p: p} + p.update(func() { + s.n = p.stack[len(p.stack)-1].child(name) + }) + return s +} + func (p *Profile) Sample(name string, d time.Duration) { p.update(func() { n := p.stack[len(p.stack)-1].child(name) @@ -189,3 +197,16 @@ func NewProfile(name string) *Profile { func Since(t time.Time) Dur { return D(time.Since(t)) } + +type Sample struct { + t time.Time + p *Profile + n *Node +} + +func (s *Sample) Stop() { + s.p.update(func() { + s.n.Duration += time.Since(s.t) + s.n.Samples++ + }) +} diff --git a/src/margo.sh/mgutil/memo.go b/src/margo.sh/mgutil/memo.go index 4bf0fa46..d9b8e724 100644 --- a/src/margo.sh/mgutil/memo.go +++ b/src/margo.sh/mgutil/memo.go @@ -1,107 +1,7 @@ package mgutil import ( - "sync" + "margo.sh/memo" ) -type memo struct { - sync.Mutex - k, v interface{} -} - -func (m *memo) value() interface{} { - m.Lock() - defer m.Unlock() - - return m.v -} - -func (m *memo) read(new func() interface{}) interface{} { - m.Lock() - defer m.Unlock() - - if m.v != nil { - return m.v - } - m.v = new() - return m.v -} - -type Memo struct { - mu sync.Mutex - ml []*memo -} - -func (m *Memo) index(k interface{}) (int, *memo) { - for i, p := range m.ml { - if p.k == k { - return i, p - } - } - return -1, nil -} - -func (m *Memo) memo(k interface{}) *memo { - m.mu.Lock() - defer m.mu.Unlock() - - _, p := m.index(k) - if p == nil { - p = &memo{k: k} - m.ml = append(m.ml, p) - } - return p -} - -func (m *Memo) Read(k interface{}, new func() interface{}) interface{} { - if m == nil { - return new() - } - return m.memo(k).read(new) -} - -func (m *Memo) Del(k interface{}) { - if m == nil { - return - } - - m.mu.Lock() - defer m.mu.Unlock() - - i, _ := m.index(k) - if i < 0 { - return - } - - m.ml[i] = m.ml[len(m.ml)-1] - m.ml[len(m.ml)-1] = nil - m.ml = m.ml[:len(m.ml)-1] -} - -func (m *Memo) Clear() { - if m == nil { - return - } - - m.mu.Lock() - defer m.mu.Unlock() - - m.ml = nil -} - -func (m *Memo) Values() map[interface{}]interface{} { - if m == nil { - return nil - } - - m.mu.Lock() - defer m.mu.Unlock() - - vals := make(map[interface{}]interface{}, len(m.ml)) - for k, p := range m.ml { - if v := p.value(); v != nil { - vals[k] = v - } - } - return vals -} +type Memo = memo.M diff --git a/src/margo.sh/mgutil/path_test.go b/src/margo.sh/mgutil/path_test.go index 50ee85a1..08674bcf 100644 --- a/src/margo.sh/mgutil/path_test.go +++ b/src/margo.sh/mgutil/path_test.go @@ -1,3 +1,5 @@ +// +build !windows + package mgutil import ( diff --git a/src/margo.sh/mgutil/sync.go b/src/margo.sh/mgutil/sync.go index 62a97024..75c4b3a1 100644 --- a/src/margo.sh/mgutil/sync.go +++ b/src/margo.sh/mgutil/sync.go @@ -17,3 +17,33 @@ func (a *AtomicBool) Set(v bool) { func (a *AtomicBool) IsSet() bool { return atomic.LoadInt32(&a.n) != 0 } + +type AtomicInt int64 + +func (i *AtomicInt) N() int64 { + return atomic.LoadInt64((*int64)(i)) +} + +func (i *AtomicInt) Set(n int64) { + atomic.StoreInt64((*int64)(i), n) +} + +func (i *AtomicInt) Swap(old, new int64) { + atomic.CompareAndSwapInt64((*int64)(i), old, new) +} + +func (i *AtomicInt) Inc() int64 { + return atomic.AddInt64((*int64)(i), 1) +} + +func (i *AtomicInt) Dec() int64 { + return atomic.AddInt64((*int64)(i), -1) +} + +func (i *AtomicInt) Add(n int64) int64 { + return atomic.AddInt64((*int64)(i), n) +} + +func (i *AtomicInt) Sub(n int64) int64 { + return atomic.AddInt64((*int64)(i), -n) +} diff --git a/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md new file mode 100644 index 00000000..1cade6ce --- /dev/null +++ b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go new file mode 100644 index 00000000..af62279a --- /dev/null +++ b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go @@ -0,0 +1,20 @@ +package md2man + +import ( + "github.com/russross/blackfriday" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := RoffRenderer(0) + extensions := 0 + extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS + extensions |= blackfriday.EXTENSION_TABLES + extensions |= blackfriday.EXTENSION_FENCED_CODE + extensions |= blackfriday.EXTENSION_AUTOLINK + extensions |= blackfriday.EXTENSION_SPACE_HEADERS + extensions |= blackfriday.EXTENSION_FOOTNOTES + extensions |= blackfriday.EXTENSION_TITLEBLOCK + + return blackfriday.Markdown(doc, renderer, extensions) +} diff --git a/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go new file mode 100644 index 00000000..8c29ec68 --- /dev/null +++ b/src/margo.sh/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go @@ -0,0 +1,285 @@ +package md2man + +import ( + "bytes" + "fmt" + "html" + "strings" + + "github.com/russross/blackfriday" +) + +type roffRenderer struct { + ListCounters []int +} + +// RoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func RoffRenderer(flags int) blackfriday.Renderer { + return &roffRenderer{} +} + +func (r *roffRenderer) GetFlags() int { + return 0 +} + +func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { + out.WriteString(".TH ") + + splitText := bytes.Split(text, []byte("\n")) + for i, line := range splitText { + line = bytes.TrimPrefix(line, []byte("% ")) + if i == 0 { + line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) + line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) + } + line = append([]byte("\""), line...) + line = append(line, []byte("\" ")...) + out.Write(line) + } + out.WriteString("\n") + + // disable hyphenation + out.WriteString(".nh\n") + // disable justification (adjust text to left margin only) + out.WriteString(".ad l\n") +} + +func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { + out.WriteString("\n.PP\n.RS\n\n.nf\n") + escapeSpecialChars(out, text) + out.WriteString("\n.fi\n.RE\n") +} + +func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { + out.WriteString("\n.PP\n.RS\n") + out.Write(text) + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint + out.Write(text) +} + +func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { + marker := out.Len() + + switch { + case marker == 0: + // This is the doc header + out.WriteString(".TH ") + case level == 1: + out.WriteString("\n\n.SH ") + case level == 2: + out.WriteString("\n.SH ") + default: + out.WriteString("\n.SS ") + } + + if !text() { + out.Truncate(marker) + return + } +} + +func (r *roffRenderer) HRule(out *bytes.Buffer) { + out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") +} + +func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { + marker := out.Len() + r.ListCounters = append(r.ListCounters, 1) + out.WriteString("\n.RS\n") + if !text() { + out.Truncate(marker) + return + } + r.ListCounters = r.ListCounters[:len(r.ListCounters)-1] + out.WriteString("\n.RE\n") +} + +func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { + if flags&blackfriday.LIST_TYPE_ORDERED != 0 { + out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1])) + r.ListCounters[len(r.ListCounters)-1]++ + } else { + out.WriteString(".IP \\(bu 2\n") + } + out.Write(text) + out.WriteString("\n") +} + +func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { + marker := out.Len() + out.WriteString("\n.PP\n") + if !text() { + out.Truncate(marker) + return + } + if marker != 0 { + out.WriteString("\n") + } +} + +func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { + out.WriteString("\n.TS\nallbox;\n") + + maxDelims := 0 + lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") + for _, w := range lines { + curDelims := strings.Count(w, "\t") + if curDelims > maxDelims { + maxDelims = curDelims + } + } + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n")) + out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n")) + out.Write(header) + if len(header) > 0 { + out.Write([]byte("\n")) + } + + out.Write(body) + out.WriteString("\n.TE\n") +} + +func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { + if out.Len() > 0 { + out.WriteString("\n") + } + out.Write(text) +} + +func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) +} + +func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { + if out.Len() > 0 { + out.WriteString("\t") + } + if len(text) > 30 { + text = append([]byte("T{\n"), text...) + text = append(text, []byte("\nT}")...) + } + if len(text) == 0 { + text = []byte{' '} + } + out.Write(text) +} + +func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { + +} + +func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { + +} + +func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { + out.WriteString("\n\\[la]") + out.Write(link) + out.WriteString("\\[ra]") +} + +func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB\\fC") + escapeSpecialChars(out, text) + out.WriteString("\\fR") +} + +func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fB") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\fI") + out.Write(text) + out.WriteString("\\fP") +} + +func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { +} + +func (r *roffRenderer) LineBreak(out *bytes.Buffer) { + out.WriteString("\n.br\n") +} + +func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { + out.Write(content) + r.AutoLink(out, link, 0) +} + +func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint + out.Write(tag) +} + +func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { + out.WriteString("\\s+2") + out.Write(text) + out.WriteString("\\s-2") +} + +func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { +} + +func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { + +} + +func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { + out.WriteString(html.UnescapeString(string(entity))) +} + +func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { + escapeSpecialChars(out, text) +} + +func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { +} + +func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(out *bytes.Buffer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out.WriteString("\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + out.Write(text[org:i]) + } + + // escape a character + if i >= len(text) { + break + } + out.WriteByte('\\') + out.WriteByte(text[i]) + } +} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md b/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md index dfe54066..72c51a5e 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/README.md @@ -54,9 +54,9 @@ How does it obtain this performance boost? It does less work to give you nearly the same output. This library calls the same `syscall` functions to do the work, but it makes fewer calls, does not throw away information that it might need, and creates less memory churn -along the way by reusing the same scratch buffer rather than -reallocating a new buffer every time it reads data from the operating -system. +along the way by reusing the same scratch buffer for reading from a +directory rather than reallocating a new buffer every time it reads +file system entry data from the operating system. While traversing a file system directory tree, `filepath.Walk` obtains the list of immediate descendants of a directory, and throws away the @@ -85,31 +85,37 @@ entire `os.FileInfo` data structure, the callback can easiy invoke ##### macOS ```Bash -go test -bench=. +$ go test -bench=. -benchmem goos: darwin goarch: amd64 pkg: github.com/karrick/godirwalk -BenchmarkFilepathWalk-8 1 3001274570 ns/op -BenchmarkGoDirWalk-8 3 465573172 ns/op -BenchmarkFlameGraphFilepathWalk-8 1 6957916936 ns/op -BenchmarkFlameGraphGoDirWalk-8 1 4210582571 ns/op +BenchmarkReadDirnamesStandardLibrary-12 50000 26250 ns/op 10360 B/op 16 allocs/op +BenchmarkReadDirnamesThisLibrary-12 50000 24372 ns/op 5064 B/op 20 allocs/op +BenchmarkFilepathWalk-12 1 1099524875 ns/op 228415912 B/op 416952 allocs/op +BenchmarkGodirwalk-12 2 526754589 ns/op 103110464 B/op 451442 allocs/op +BenchmarkGodirwalkUnsorted-12 3 509219296 ns/op 100751400 B/op 378800 allocs/op +BenchmarkFlameGraphFilepathWalk-12 1 7478618820 ns/op 2284138176 B/op 4169453 allocs/op +BenchmarkFlameGraphGodirwalk-12 1 4977264058 ns/op 1031105328 B/op 4514423 allocs/op PASS -ok github.com/karrick/godirwalk 16.822s +ok github.com/karrick/godirwalk 21.219s ``` ##### Linux ```Bash -go test -bench=. +$ go test -bench=. -benchmem goos: linux goarch: amd64 pkg: github.com/karrick/godirwalk -BenchmarkFilepathWalk-12 1 1609189170 ns/op -BenchmarkGoDirWalk-12 5 211336628 ns/op -BenchmarkFlameGraphFilepathWalk-12 1 3968119932 ns/op -BenchmarkFlameGraphGoDirWalk-12 1 2139598998 ns/op +BenchmarkReadDirnamesStandardLibrary-12 100000 15458 ns/op 10360 B/op 16 allocs/op +BenchmarkReadDirnamesThisLibrary-12 100000 14646 ns/op 5064 B/op 20 allocs/op +BenchmarkFilepathWalk-12 2 631034745 ns/op 228210216 B/op 416939 allocs/op +BenchmarkGodirwalk-12 3 358714883 ns/op 102988664 B/op 451437 allocs/op +BenchmarkGodirwalkUnsorted-12 3 355363915 ns/op 100629234 B/op 378796 allocs/op +BenchmarkFlameGraphFilepathWalk-12 1 6086913991 ns/op 2282104720 B/op 4169417 allocs/op +BenchmarkFlameGraphGodirwalk-12 1 3456398824 ns/op 1029886400 B/op 4514373 allocs/op PASS -ok github.com/karrick/godirwalk 9.007s +ok github.com/karrick/godirwalk 19.179s ``` ### It's more correct on Windows than `filepath.Walk` @@ -183,11 +189,21 @@ can be invoked in manner to do so, by setting the The default behavior of this library is to always sort the immediate descendants of a directory prior to visiting each node, just like `filepath.Walk` does. This is usually the desired behavior. However, -this does come at a performance penalty to sort the names when a -directory node has many entries. If a particular use case exists that -does not require sorting the directory's immediate descendants prior -to visiting its nodes, this library will skip the sorting step when -the `Unsorted` parameter is set to true. +this does come at slight performance and memory penalties required to +sort the names when a directory node has many entries. Additionally if +caller specifies `Unsorted` enumeration, reading directories is lazily +performed as the caller consumes entries. If a particular use case +exists that does not require sorting the directory's immediate +descendants prior to visiting its nodes, this library will skip the +sorting step when the `Unsorted` parameter is set to true. + +Here's an interesting read of the potential hazzards of traversing a +file system hierarchy in a non-deterministic order. If you know the +problem you are solving is not affected by the order files are +visited, then I encourage you to use `Unsorted`. Otherwise skip +setting this option. + +[Researchers find bug in Python script may have affected hundreds of studies](https://arstechnica.com/information-technology/2019/10/chemists-discover-cross-platform-python-scripts-not-so-cross-platform/) #### Configurable Post Children Callback diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml b/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml index 64d1d062..81d5cf7c 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/azure-pipelines.yml @@ -22,7 +22,7 @@ jobs: - task: Go@0 inputs: command: test - arguments: -v ./... + arguments: -race -v ./... displayName: 'Execute Tests' - job: Mac @@ -37,7 +37,7 @@ jobs: - task: Go@0 inputs: command: test - arguments: -v ./... + arguments: -race -v ./... displayName: 'Execute Tests' - job: Windows @@ -52,5 +52,5 @@ jobs: - task: Go@0 inputs: command: test - arguments: -v ./... + arguments: -race -v ./... displayName: 'Execute Tests' diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go new file mode 100644 index 00000000..6e1cb0bf --- /dev/null +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_development.go @@ -0,0 +1,14 @@ +// +build godirwalk_debug + +package godirwalk + +import ( + "fmt" + "os" +) + +// debug formats and prints arguments to stderr for development builds +func debug(f string, a ...interface{}) { + // fmt.Fprintf(os.Stderr, f, a...) + os.Stderr.Write([]byte("godirwalk: " + fmt.Sprintf(f, a...))) +} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go new file mode 100644 index 00000000..98617873 --- /dev/null +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/debug_release.go @@ -0,0 +1,6 @@ +// +build !godirwalk_debug + +package godirwalk + +// debug is a no-op for release builds +func debug(_ string, _ ...interface{}) {} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go index 419ba2f2..3bee8b26 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/dirent.go @@ -8,8 +8,8 @@ import ( // Dirent stores the name and file system mode type of discovered file system // entries. type Dirent struct { - name string - modeType os.FileMode + name string // name is the basename of the file system entry. + modeType os.FileMode // modeType is the type of file system entry. } // NewDirent returns a newly initialized Dirent structure, or an error. This @@ -19,16 +19,24 @@ type Dirent struct { // functions in this library that read and walk directories, but is provided, // however, for the occasion when a program needs to create a Dirent. func NewDirent(osPathname string) (*Dirent, error) { - fi, err := os.Lstat(osPathname) + modeType, err := modeType(osPathname) if err != nil { return nil, err } return &Dirent{ name: filepath.Base(osPathname), - modeType: fi.Mode() & os.ModeType, + modeType: modeType, }, nil } +// // dup returns a duplicate of the directory entry. +// func (de Dirent) dup() *Dirent { +// return &Dirent{ +// name: de.name, +// modeType: de.modeType, +// } +// } + // Name returns the basename of the file system entry. func (de Dirent) Name() string { return de.name } @@ -61,6 +69,12 @@ func (de Dirent) IsSymlink() bool { return de.modeType&os.ModeSymlink != 0 } // IsDevice returns true if and only if the Dirent represents a device file. func (de Dirent) IsDevice() bool { return de.modeType&os.ModeDevice != 0 } +// reset releases memory held by entry err and name, and resets mode type to 0. +func (de *Dirent) reset() { + de.name = "" + de.modeType = 0 +} + // Dirents represents a slice of Dirent pointers, which are sortable by // name. This type satisfies the `sort.Interface` interface. type Dirents []*Dirent diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod b/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod index 68668c9f..b54a5431 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/go.mod @@ -1,3 +1 @@ module github.com/karrick/godirwalk - -go 1.12 diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go new file mode 100644 index 00000000..6427a685 --- /dev/null +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeType.go @@ -0,0 +1,22 @@ +package godirwalk + +import ( + "os" +) + +// modeType returns the mode type of the file system entry identified by +// osPathname by calling os.LStat function, to intentionally not follow symbolic +// links. +// +// Even though os.LStat provides all file mode bits, we want to ensure same +// values returned to caller regardless of whether we obtained file mode bits +// from syscall or stat call. Therefore mask out the additional file mode bits +// that are provided by stat but not by the syscall, so users can rely on their +// values. +func modeType(osPathname string) (os.FileMode, error) { + fi, err := os.Lstat(osPathname) + if err == nil { + return fi.Mode() & os.ModeType, nil + } + return 0, err +} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go index 6fbecf08..7890e776 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithType.go @@ -8,12 +8,12 @@ import ( "syscall" ) -// modeType converts a syscall defined constant, which is in purview of OS, to a -// constant defined by Go, assumed by this project to be stable. +// modeTypeFromDirent converts a syscall defined constant, which is in purview +// of OS, to a constant defined by Go, assumed by this project to be stable. // // When the syscall constant is not recognized, this function falls back to a // Stat on the file system. -func modeType(de *syscall.Dirent, osDirname, osChildname string) (os.FileMode, error) { +func modeTypeFromDirent(de *syscall.Dirent, osDirname, osBasename string) (os.FileMode, error) { switch de.Type { case syscall.DT_REG: return 0, nil @@ -30,18 +30,8 @@ func modeType(de *syscall.Dirent, osDirname, osChildname string) (os.FileMode, e case syscall.DT_SOCK: return os.ModeSocket, nil default: - // If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), - // then resolve actual mode by getting stat. - fi, err := os.Lstat(filepath.Join(osDirname, osChildname)) - if err != nil { - return 0, err - } - // Even though the stat provided all file mode bits, we want to - // ensure same values returned to caller regardless of whether - // we obtained file mode bits from syscall or stat call. - // Therefore mask out the additional file mode bits that are - // provided by stat but not by the syscall, so users can rely on - // their values. - return fi.Mode() & os.ModeType, nil + // If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), then + // resolve actual mode by reading file information. + return modeType(filepath.Join(osDirname, osBasename)) } } diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go index 1c801d1b..5299392e 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/modeTypeWithoutType.go @@ -8,21 +8,11 @@ import ( "syscall" ) -// modeType converts a syscall defined constant, which is in purview of OS, to a -// constant defined by Go, assumed by this project to be stable. +// modeTypeFromDirent converts a syscall defined constant, which is in purview +// of OS, to a constant defined by Go, assumed by this project to be stable. // -// Because some operating system syscall.Dirent structure does not include a -// Type field, fall back on Stat of the file system. -func modeType(_ *syscall.Dirent, osDirname, osChildname string) (os.FileMode, error) { - fi, err := os.Lstat(filepath.Join(osDirname, osChildname)) - if err != nil { - return 0, err - } - // Even though the stat provided all file mode bits, we want to - // ensure same values returned to caller regardless of whether - // we obtained file mode bits from syscall or stat call. - // Therefore mask out the additional file mode bits that are - // provided by stat but not by the syscall, so users can rely on - // their values. - return fi.Mode() & os.ModeType, nil +// Because some operating system syscall.Dirent structures do not include a Type +// field, fall back on Stat of the file system. +func modeTypeFromDirent(_ *syscall.Dirent, osDirname, osBasename string) (os.FileMode, error) { + return modeType(filepath.Join(osDirname, osBasename)) } diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go index 0c191084..33cdbe34 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir.go @@ -5,8 +5,9 @@ package godirwalk // descendant of the specified directory. If the specified directory is a // symbolic link, it will be resolved. // -// If an optional scratch buffer is provided that is at least one page of -// memory, it will be used when reading directory entries from the file system. +// The second parameter was an optional scratch buffer, but is no longer used +// because ReadDirents invokes Scanner to enumerate the contents of the +// directory. // // children, err := godirwalk.ReadDirents(osDirname, nil) // if err != nil { @@ -16,24 +17,37 @@ package godirwalk // for _, child := range children { // fmt.Printf("%s %s\n", child.ModeType, child.Name) // } -func ReadDirents(osDirname string, scratchBuffer []byte) (Dirents, error) { - // Invokes build flag enabled version of this function. - return readdirents(osDirname, scratchBuffer) +func ReadDirents(osDirname string, _ []byte) (Dirents, error) { + var entries Dirents + scanner, err := NewScanner(osDirname) + if err != nil { + return nil, err + } + for scanner.Scan() { + if dirent, err := scanner.Dirent(); err == nil { + entries = append(entries, dirent) + } + } + if err = scanner.Err(); err != nil { + return nil, err + } + return entries, nil } // ReadDirnames returns a slice of strings, representing the immediate // descendants of the specified directory. If the specified directory is a // symbolic link, it will be resolved. // -// If an optional scratch buffer is provided that is at least one page of -// memory, it will be used when reading directory entries from the file system. +// The second parameter was an optional scratch buffer, but is no longer used +// because ReadDirents invokes Scanner to enumerate the contents of the +// directory. // // Note that this function, depending on operating system, may or may not invoke // the ReadDirents function, in order to prepare the list of immediate // descendants. Therefore, if your program needs both the names and the file // system mode types of descendants, it will always be faster to invoke // ReadDirents directly, rather than calling this function, then looping over -// the results and calling os.Stat for each child. +// the results and calling os.Stat or os.LStat for each entry. // // children, err := godirwalk.ReadDirnames(osDirname, nil) // if err != nil { @@ -43,7 +57,17 @@ func ReadDirents(osDirname string, scratchBuffer []byte) (Dirents, error) { // for _, child := range children { // fmt.Printf("%s\n", child) // } -func ReadDirnames(osDirname string, scratchBuffer []byte) ([]string, error) { - // Invokes build flag enabled version of this function. - return readdirnames(osDirname, scratchBuffer) +func ReadDirnames(osDirname string, _ []byte) ([]string, error) { + var entries []string + scanner, err := NewScanner(osDirname) + if err != nil { + return nil, err + } + for scanner.Scan() { + entries = append(entries, scanner.Name()) + } + if err = scanner.Err(); err != nil { + return nil, err + } + return entries, nil } diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go deleted file mode 100644 index 3bedc651..00000000 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_unix.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build !windows - -package godirwalk - -import ( - "os" - "syscall" - "unsafe" -) - -func readdirents(osDirname string, scratchBuffer []byte) (Dirents, error) { - dh, err := os.Open(osDirname) - if err != nil { - return nil, err - } - fd := int(dh.Fd()) - - if len(scratchBuffer) < MinimumScratchBufferSize { - scratchBuffer = make([]byte, DefaultScratchBufferSize) - } - - var entries Dirents - var de *syscall.Dirent - - for { - n, err := syscall.ReadDirent(fd, scratchBuffer) - if err != nil { - _ = dh.Close() // ignore potential error returned by Close - return nil, err - } - if n <= 0 { - break // end of directory reached - } - // Loop over the bytes returned by reading the directory entries. - buf := scratchBuffer[:n] - for len(buf) > 0 { - de = (*syscall.Dirent)(unsafe.Pointer(&buf[0])) // point entry to first syscall.Dirent in buffer - buf = buf[de.Reclen:] // advance buffer for next iteration through loop - - if inoFromDirent(de) == 0 { - continue // this item has been deleted, but its entry not yet removed from directory listing - } - - nameSlice := nameFromDirent(de) - namlen := len(nameSlice) - if (namlen == 0) || (namlen == 1 && nameSlice[0] == '.') || (namlen == 2 && nameSlice[0] == '.' && nameSlice[1] == '.') { - continue // skip unimportant entries - } - osChildname := string(nameSlice) - - mode, err := modeType(de, osDirname, osChildname) - if err != nil { - _ = dh.Close() // ignore potential error returned by Close - return nil, err - } - - entries = append(entries, &Dirent{name: osChildname, modeType: mode}) - } - } - - if err = dh.Close(); err != nil { - return nil, err - } - return entries, nil -} - -func readdirnames(osDirname string, scratchBuffer []byte) ([]string, error) { - dh, err := os.Open(osDirname) - if err != nil { - return nil, err - } - fd := int(dh.Fd()) - - if len(scratchBuffer) < MinimumScratchBufferSize { - scratchBuffer = make([]byte, DefaultScratchBufferSize) - } - - var entries []string - var de *syscall.Dirent - - for { - n, err := syscall.ReadDirent(fd, scratchBuffer) - if err != nil { - _ = dh.Close() // ignore potential error returned by Close - return nil, err - } - if n <= 0 { - break // end of directory reached - } - // Loop over the bytes returned by reading the directory entries. - buf := scratchBuffer[:n] - for len(buf) > 0 { - de = (*syscall.Dirent)(unsafe.Pointer(&buf[0])) // point entry to first syscall.Dirent in buffer - buf = buf[de.Reclen:] // advance buffer for next iteration through loop - - if inoFromDirent(de) == 0 { - continue // this item has been deleted, but its entry not yet removed from directory listing - } - - nameSlice := nameFromDirent(de) - namlen := len(nameSlice) - if (namlen == 0) || (namlen == 1 && nameSlice[0] == '.') || (namlen == 2 && nameSlice[0] == '.' && nameSlice[1] == '.') { - continue // skip unimportant entries - } - osChildname := string(nameSlice) - - entries = append(entries, osChildname) - } - } - - if err = dh.Close(); err != nil { - return nil, err - } - return entries, nil -} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go deleted file mode 100644 index 0327bdfb..00000000 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/readdir_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package godirwalk - -// The functions in this file are mere wrappers of what is already provided by -// standard library, in order to provide the same API as this library provides. -// -// The scratch buffer parameter in these functions is the underscore because -// presently that parameter is ignored by the functions for this architecture. -// -// Please send PR or link to article if you know of a more performant way of -// enumerating directory contents and mode types on Windows. - -import "os" - -func readdirents(osDirname string, _ []byte) (Dirents, error) { - dh, err := os.Open(osDirname) - if err != nil { - return nil, err - } - - fileinfos, err := dh.Readdir(0) - if er := dh.Close(); err == nil { - err = er - } - if err != nil { - return nil, err - } - - entries := make(Dirents, len(fileinfos)) - for i, info := range fileinfos { - entries[i] = &Dirent{name: info.Name(), modeType: info.Mode() & os.ModeType} - } - - return entries, nil -} - -func readdirnames(osDirname string, _ []byte) ([]string, error) { - dh, err := os.Open(osDirname) - if err != nil { - return nil, err - } - - entries, err := dh.Readdirnames(0) - if er := dh.Close(); err == nil { - err = er - } - if err != nil { - return nil, err - } - - return entries, nil -} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go new file mode 100644 index 00000000..f9a4f7ea --- /dev/null +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_unix.go @@ -0,0 +1,140 @@ +// +build !windows + +package godirwalk + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// MinimumScratchBufferSize specifies the minimum size of the scratch buffer +// that Walk, ReadDirents, ReadDirnames, and Scandir will use when reading file +// entries from the operating system. It is initialized to the result from +// calling `os.Getpagesize()` during program startup. +var MinimumScratchBufferSize = os.Getpagesize() + +// Scanner is an iterator to enumerate the contents of a directory. +type Scanner struct { + scratchBuffer []byte // read directory bytes from file system into this buffer + workBuffer []byte // points into scratchBuffer, from which we chunk out directory entries + osDirname string + childName string + err error // err is the error associated with scanning directory + statErr error // statErr is any error return while attempting to stat an entry + dh *os.File // used to close directory after done reading + de *Dirent // most recently decoded directory entry + sde *syscall.Dirent + fd int // file descriptor used to read entries from directory +} + +// NewScanner returns a new directory Scanner. +func NewScanner(osDirname string) (*Scanner, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, err + } + scanner := &Scanner{ + scratchBuffer: make([]byte, MinimumScratchBufferSize), + osDirname: osDirname, + dh: dh, + fd: int(dh.Fd()), + } + return scanner, nil +} + +// Dirent returns the current directory entry while scanning a directory. +func (s *Scanner) Dirent() (*Dirent, error) { + if s.de == nil { + s.de = &Dirent{name: s.childName} + s.de.modeType, s.statErr = modeTypeFromDirent(s.sde, s.osDirname, s.childName) + } + return s.de, s.statErr +} + +// done is called when directory scanner unable to continue, with either the +// triggering error, or nil when there are simply no more entries to read from +// the directory. +func (s *Scanner) done(err error) { + if s.dh == nil { + return + } + cerr := s.dh.Close() + s.dh = nil + + if err == nil { + s.err = cerr + } else { + s.err = err + } + + s.osDirname, s.childName = "", "" + s.scratchBuffer, s.workBuffer = nil, nil + s.statErr, s.de, s.sde = nil, nil, nil + s.fd = 0 +} + +// Err returns the error associated with scanning a directory. +func (s *Scanner) Err() error { + s.done(s.err) + if s.err == io.EOF { + return nil + } + return s.err +} + +// Name returns the name of the current directory entry while scanning a +// directory. +func (s *Scanner) Name() string { return s.childName } + +// Scan potentially reads and then decodes the next directory entry from the +// file system. +// +// When it returns false, this releases resources used by the Scanner then +// returns any error associated with closing the file system directory resource. +func (s *Scanner) Scan() bool { + if s.err != nil { + return false + } + + for { + // When the work buffer has nothing remaining to decode, we need to load + // more data from disk. + if len(s.workBuffer) == 0 { + n, err := syscall.ReadDirent(s.fd, s.scratchBuffer) + if err != nil { + s.done(err) + return false + } + if n <= 0 { // end of directory + s.done(io.EOF) + return false + } + s.workBuffer = s.scratchBuffer[:n] // trim work buffer to number of bytes read + } + + // Loop until we have a usable file system entry, or we run out of data + // in the work buffer. + for len(s.workBuffer) > 0 { + s.sde = (*syscall.Dirent)(unsafe.Pointer(&s.workBuffer[0])) // point entry to first syscall.Dirent in buffer + s.workBuffer = s.workBuffer[s.sde.Reclen:] // advance buffer for next iteration through loop + + if inoFromDirent(s.sde) == 0 { + continue // inode set to 0 indicates an entry that was marked as deleted + } + + nameSlice := nameFromDirent(s.sde) + namlen := len(nameSlice) + if namlen == 0 || (nameSlice[0] == '.' && (namlen == 1 || (namlen == 2 && nameSlice[1] == '.'))) { + continue + } + + s.de = nil + s.childName = string(nameSlice) + return true + } + // No more data in the work buffer, so loop around in the outside loop + // to fetch more data. + } +} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go new file mode 100644 index 00000000..c91e2797 --- /dev/null +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/scandir_windows.go @@ -0,0 +1,92 @@ +// +build windows + +package godirwalk + +import ( + "fmt" + "io" + "os" +) + +// Scanner is an iterator to enumerate the contents of a directory. +type Scanner struct { + osDirname string + dh *os.File // dh is handle to open directory + de *Dirent + err error // err is the error associated with scanning directory +} + +// NewScanner returns a new directory Scanner. +func NewScanner(osDirname string) (*Scanner, error) { + dh, err := os.Open(osDirname) + if err != nil { + return nil, err + } + scanner := &Scanner{ + osDirname: osDirname, + dh: dh, + } + return scanner, nil +} + +// Dirent returns the current directory entry while scanning a directory. +func (s *Scanner) Dirent() (*Dirent, error) { return s.de, nil } + +// done is called when directory scanner unable to continue, with either the +// triggering error, or nil when there are simply no more entries to read from +// the directory. +func (s *Scanner) done(err error) { + if s.dh == nil { + return + } + cerr := s.dh.Close() + s.dh = nil + + if err == nil { + s.err = cerr + } else { + s.err = err + } + + s.osDirname = "" + s.de = nil +} + +// Err returns the error associated with scanning a directory. +func (s *Scanner) Err() error { + s.done(s.err) + if s.err == io.EOF { + return nil + } + return s.err +} + +// Name returns the name of the current directory entry while scanning a +// directory. +func (s *Scanner) Name() string { return s.de.name } + +// Scan potentially reads and then decodes the next directory entry from the +// file system. +func (s *Scanner) Scan() bool { + if s.err != nil { + return false + } + + fileinfos, err := s.dh.Readdir(1) + if err != nil { + s.err = err + return false + } + + if l := len(fileinfos); l != 1 { + s.err = fmt.Errorf("expected a single entry rather than %d", l) + return false + } + + fi := fileinfos[0] + s.de = &Dirent{ + name: fi.Name(), + modeType: fi.Mode() & os.ModeType, + } + return true +} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go new file mode 100644 index 00000000..1abf3833 --- /dev/null +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/scanner.go @@ -0,0 +1,44 @@ +package godirwalk + +import "sort" + +type scanner interface { + Dirent() (*Dirent, error) + Err() error + Name() string + Scan() bool +} + +// sortedScanner enumerates through a directory's contents after reading the +// entire directory and sorting the entries by name. Used by walk to simplify +// its implementation. +type sortedScanner struct { + dd []*Dirent + de *Dirent +} + +func newSortedScanner(osPathname string) (*sortedScanner, error) { + deChildren, err := ReadDirents(osPathname, nil) + if err != nil { + return nil, err + } + sort.Sort(deChildren) + return &sortedScanner{dd: deChildren}, nil +} + +func (d *sortedScanner) Err() error { + d.dd, d.de = nil, nil + return nil +} + +func (d *sortedScanner) Dirent() (*Dirent, error) { return d.de, nil } + +func (d *sortedScanner) Name() string { return d.de.name } + +func (d *sortedScanner) Scan() bool { + if len(d.dd) > 0 { + d.de, d.dd = d.dd[0], d.dd[1:] + return true + } + return false +} diff --git a/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go b/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go index 10aa2b4e..152527e1 100644 --- a/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go +++ b/src/margo.sh/vendor/github.com/karrick/godirwalk/walk.go @@ -5,26 +5,13 @@ import ( "fmt" "os" "path/filepath" - "sort" ) -// DefaultScratchBufferSize specifies the size of the scratch buffer that will -// be allocated by Walk, ReadDirents, or ReadDirnames when a scratch buffer is -// not provided or the scratch buffer that is provided is smaller than -// MinimumScratchBufferSize bytes. This may seem like a large value; however, -// when a program intends to enumerate large directories, having a larger -// scratch buffer results in fewer operating system calls. -const DefaultScratchBufferSize = 64 * 1024 - -// MinimumScratchBufferSize specifies the minimum size of the scratch buffer -// that Walk, ReadDirents, and ReadDirnames will use when reading file entries -// from the operating system. It is initialized to the result from calling -// `os.Getpagesize()` during program startup. -var MinimumScratchBufferSize int - -func init() { - MinimumScratchBufferSize = os.Getpagesize() -} +// DefaultScratchBuffer is a deprecated config parameter, whose usage was +// obsoleted by the introduction of the Scanner struct, and migrating +// ReadDirents, ReadDirnames, and Walk to use Scanner for enumerating directory +// contents. +const DefaultScratchBufferSize = 0 // Options provide parameters for how the Walk function operates. type Options struct { @@ -80,13 +67,17 @@ type Options struct { // processed. PostChildrenCallback WalkFunc - // ScratchBuffer is an optional byte slice to use as a scratch buffer for - // Walk to use when reading directory entries, to reduce amount of garbage - // generation. Not all architectures take advantage of the scratch - // buffer. If omitted or the provided buffer has fewer bytes than - // MinimumScratchBufferSize, then a buffer with DefaultScratchBufferSize - // bytes will be created and used once per Walk invocation. + // ScratchBuffer is a deprecated config parameter, whose usage was obsoleted + // by the introduction of the Scanner struct, and migrating ReadDirents, + // ReadDirnames, and Walk to use Scanner for enumerating directory contents. ScratchBuffer []byte + + // AllowNonDirectory causes Walk to bypass the check that ensures it is + // being called on a directory node, or when FollowSymbolicLinks is true, a + // symbolic link that points to a directory. Leave this value false to have + // Walk return an error when called on a non-directory. Set this true to + // have Walk run even when called on a non-directory node. + AllowNonDirectory bool } // ErrorAction defines a set of actions the Walk function could take based on @@ -135,9 +126,7 @@ type WalkFunc func(osPathname string, directoryEntry *Dirent) error // Walk walks the file tree rooted at the specified directory, calling the // specified callback function for each file system node in the tree, including -// root, symbolic links, and other node types. The nodes are walked in lexical -// order, which makes the output deterministic but means that for very large -// directories this function can be inefficient. +// root, symbolic links, and other node types. // // This function is often much faster than filepath.Walk because it does not // invoke os.Stat for every node it encounters, but rather obtains the file @@ -175,8 +164,8 @@ type WalkFunc func(osPathname string, directoryEntry *Dirent) error // } // } func Walk(pathname string, options *Options) error { - if options.Callback == nil { - return errors.New("cannot walk without a specified Callback function") + if options == nil || options.Callback == nil { + return errors.New("cannot walk without non-nil options and Callback function") } pathname = filepath.Clean(pathname) @@ -186,21 +175,23 @@ func Walk(pathname string, options *Options) error { if options.FollowSymbolicLinks { fi, err = os.Stat(pathname) - if err != nil { - return err - } } else { fi, err = os.Lstat(pathname) - if err != nil { - return err - } + } + if err != nil { + return err } mode := fi.Mode() - if mode&os.ModeDir == 0 { + if !options.AllowNonDirectory && mode&os.ModeDir == 0 { return fmt.Errorf("cannot Walk non-directory: %s", pathname) } + dirent := &Dirent{ + name: filepath.Base(pathname), + modeType: mode & os.ModeType, + } + // If ErrorCallback is nil, set to a default value that halts the walk // process on all operating system errors. This is done to allow error // handling to be more succinct in the walk code. @@ -208,20 +199,10 @@ func Walk(pathname string, options *Options) error { options.ErrorCallback = defaultErrorCallback } - if len(options.ScratchBuffer) < MinimumScratchBufferSize { - options.ScratchBuffer = make([]byte, DefaultScratchBufferSize) - } - - dirent := &Dirent{ - name: filepath.Base(pathname), - modeType: mode & os.ModeType, - } - - err = walk(pathname, dirent, options) - if err == filepath.SkipDir { - return nil // silence SkipDir for top level + if err = walk(pathname, dirent, options); err != filepath.SkipDir { + return err } - return err + return nil // silence SkipDir for top level } // defaultErrorCallback always returns Halt because if the upstream code did not @@ -263,7 +244,19 @@ func walk(osPathname string, dirent *Dirent, options *Options) error { // If get here, then specified pathname refers to a directory or a // symbolic link to a directory. - deChildren, err := ReadDirents(osPathname, options.ScratchBuffer) + + var ds scanner + + if options.Unsorted { + // When upstream does not request a sorted iteration, it's more memory + // efficient to read a single child at a time from the file system. + ds, err = NewScanner(osPathname) + } else { + // When upstream wants a sorted iteration, we must read the entire + // directory and sort through the child names, and then iterate on each + // child. + ds, err = newSortedScanner(osPathname) + } if err != nil { if action := options.ErrorCallback(osPathname, err); action == SkipNode { return nil @@ -271,13 +264,17 @@ func walk(osPathname string, dirent *Dirent, options *Options) error { return err } - if !options.Unsorted { - sort.Sort(deChildren) // sort children entries unless upstream says to leave unsorted - } - - for _, deChild := range deChildren { + for ds.Scan() { + deChild, err := ds.Dirent() osChildname := filepath.Join(osPathname, deChild.name) + if err != nil { + if action := options.ErrorCallback(osChildname, err); action == SkipNode { + return nil + } + return err + } err = walk(osChildname, deChild, options) + debug("osChildname: %q; error: %v\n", osChildname, err) if err == nil { continue } @@ -300,6 +297,9 @@ func walk(osPathname string, dirent *Dirent, options *Options) error { } // continue processing remaining siblings } + if err = ds.Err(); err != nil { + return err + } if options.PostChildrenCallback == nil { return nil diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore b/src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore new file mode 100644 index 00000000..75623dcc --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml b/src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml new file mode 100644 index 00000000..2f3351d7 --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.9.x" + - "1.10.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt b/src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 00000000..2885af36 --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/README.md b/src/margo.sh/vendor/github.com/russross/blackfriday/README.md new file mode 100644 index 00000000..3c62e137 --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/README.md @@ -0,0 +1,369 @@ +Blackfriday +[![Build Status][BuildSVG]][BuildURL] +[![Godoc][GodocV2SVG]][GodocV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with any modern Go release. With Go and git installed: + + go get -u gopkg.in/russross/blackfriday.v2 + +will download, compile, and install the package into your `$GOPATH` directory +hierarchy. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://godoc.org/gopkg.in/russross/blackfriday.v2. + +It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, +but we highly recommend using package management tool like [dep][7] or +[Glide][8] and make use of semantic versioning. With package management you +should import `github.com/russross/blackfriday` and specify that you're using +version 2.0.0. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://godoc.org/github.com/russross/blackfriday + +### Known issue with `dep` + +There is a known problem with using Blackfriday v1 _transitively_ and `dep`. +Currently `dep` prioritizes semver versions over anything else, and picks the +latest one, plus it does not apply a `[[constraint]]` specifier to transitively +pulled in packages. So if you're using something that uses Blackfriday v1, but +that something does not use `dep` yet, you will get Blackfriday v2 pulled in and +your first dependency will fail to build. + +There are couple of fixes for it, documented here: +https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version + +Meanwhile, `dep` team is working on a more general solution to the constraints +on transitive dependencies problem: https://github.com/golang/dep/issues/1124. + + +Usage +----- + +### v1 + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### v2 + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "gopkg.in/russross/blackfriday.v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options, v1 + +If you want to customize the set of options, first get a renderer +(currently only the HTML output engine), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +### Custom options, v2 + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ``` go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + [6]: https://labix.org/gopkg.in "gopkg.in" + [7]: https://github.com/golang/dep/ "dep" + [8]: https://github.com/Masterminds/glide "Glide" + + [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master + [BuildURL]: https://travis-ci.org/russross/blackfriday + [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg + [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/block.go b/src/margo.sh/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 00000000..45c21a6c --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1474 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "strings" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go info string here + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + + i++ + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var infoString string + beg, marker := isFenceLine(data, &infoString, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), infoString) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + if p.flags&EXTENSION_FENCED_CODE != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + *flags |= LIST_ITEM_CONTAINS_BLOCK + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indent : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/doc.go b/src/margo.sh/vendor/github.com/russross/blackfriday/doc.go new file mode 100644 index 00000000..9656c42a --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/go.mod b/src/margo.sh/vendor/github.com/russross/blackfriday/go.mod new file mode 100644 index 00000000..b05561a0 --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/go.mod @@ -0,0 +1 @@ +module github.com/russross/blackfriday diff --git a/src/margo.sh/vendor/github.com/russross/blackfriday/html.go b/src/margo.sh/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 00000000..e0a6c69c --- /dev/null +++ b/src/margo.sh/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,938 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded