diff --git a/Dockerfile b/Dockerfile
index 8821d6a86..46daeb63b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,10 +1,11 @@
FROM golang:1.11-alpine as build
-RUN apk add --update --no-cache git gcc musl-dev
+RUN apk add --update --no-cache git gcc musl-dev make
ADD . /go/src/moul.io/depviz
WORKDIR /go/src/moul.io/depviz
RUN GO111MODULE=on go get -v .
+RUN make install
FROM alpine
RUN apk add --update --no-cache ca-certificates
COPY --from=build /go/bin/depviz /bin/
-ENTRYPOINT ["depviz"]
\ No newline at end of file
+ENTRYPOINT ["depviz"]
diff --git a/README.md b/README.md
index 6fea9ae60..7276c4871 100644
--- a/README.md
+++ b/README.md
@@ -48,7 +48,9 @@ _inspired by this discussion: [jbenet/random-ideas#37](https://github.com/jbenet
## Install (with Golang)
-`go get moul.io/depviz`
+```
+go get moul.io/depviz
+```
## Usage
diff --git a/chi_util.go b/cli/chi_util.go
similarity index 99%
rename from chi_util.go
rename to cli/chi_util.go
index 96e1f9d59..45d222610 100644
--- a/chi_util.go
+++ b/cli/chi_util.go
@@ -1,4 +1,4 @@
-package main
+package cli
import (
"net/http"
diff --git a/cli/cmd_airtable.go b/cli/cmd_airtable.go
new file mode 100644
index 000000000..51bab74cd
--- /dev/null
+++ b/cli/cmd_airtable.go
@@ -0,0 +1,249 @@
+package cli
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/brianloveswords/airtable"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+ "go.uber.org/zap"
+ "moul.io/depviz/pkg/airtabledb"
+ "moul.io/depviz/pkg/issues"
+)
+
+type airtableOptions struct {
+ IssuesTableName string `mapstructure:"airtable-issues-table-name"`
+ RepositoriesTableName string `mapstructure:"airtable-repositories-table-name"`
+ LabelsTableName string `mapstructure:"airtable-labels-table-name"`
+ MilestonesTableName string `mapstructure:"airtable-milestones-table-name"`
+ ProvidersTableName string `mapstructure:"airtable-providers-table-name"`
+ AccountsTableName string `mapstructure:"airtable-accounts-table-name"`
+ BaseID string `mapstructure:"airtable-base-id"`
+ Token string `mapstructure:"airtable-token"`
+ DestroyInvalidRecords bool `mapstructure:"airtable-destroy-invalid-records"`
+ TableNames []string
+
+ Targets []issues.Target `mapstructure:"targets"`
+}
+
+func (opts airtableOptions) String() string {
+ out, _ := json.Marshal(opts)
+ return string(out)
+}
+
+type airtableCommand struct {
+ opts airtableOptions
+}
+
+func (cmd *airtableCommand) LoadDefaultOptions() error {
+ if err := viper.Unmarshal(&cmd.opts); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cmd *airtableCommand) ParseFlags(flags *pflag.FlagSet) {
+ cmd.opts.TableNames = make([]string, airtabledb.NumTables)
+
+ flags.StringVarP(&cmd.opts.IssuesTableName, "airtable-issues-table-name", "", "Issues and PRs", "Airtable issues table name")
+ cmd.opts.TableNames[airtabledb.IssueIndex] = cmd.opts.IssuesTableName
+
+ flags.StringVarP(&cmd.opts.RepositoriesTableName, "airtable-repositories-table-name", "", "Repositories", "Airtable repositories table name")
+ cmd.opts.TableNames[airtabledb.RepositoryIndex] = cmd.opts.RepositoriesTableName
+
+ flags.StringVarP(&cmd.opts.AccountsTableName, "airtable-accounts-table-name", "", "Accounts", "Airtable accounts table name")
+ cmd.opts.TableNames[airtabledb.AccountIndex] = cmd.opts.AccountsTableName
+
+ flags.StringVarP(&cmd.opts.LabelsTableName, "airtable-labels-table-name", "", "Labels", "Airtable labels table name")
+ cmd.opts.TableNames[airtabledb.LabelIndex] = cmd.opts.LabelsTableName
+
+ flags.StringVarP(&cmd.opts.MilestonesTableName, "airtable-milestones-table-name", "", "Milestones", "Airtable milestones table nfame")
+ cmd.opts.TableNames[airtabledb.MilestoneIndex] = cmd.opts.MilestonesTableName
+
+ flags.StringVarP(&cmd.opts.ProvidersTableName, "airtable-providers-table-name", "", "Providers", "Airtable providers table name")
+ cmd.opts.TableNames[airtabledb.ProviderIndex] = cmd.opts.ProvidersTableName
+
+ flags.StringVarP(&cmd.opts.BaseID, "airtable-base-id", "", "", "Airtable base ID")
+ flags.StringVarP(&cmd.opts.Token, "airtable-token", "", "", "Airtable token")
+ flags.BoolVarP(&cmd.opts.DestroyInvalidRecords, "airtable-destroy-invalid-records", "", false, "Destroy invalid records")
+
+ viper.BindPFlags(flags)
+}
+
+func (cmd *airtableCommand) NewCobraCommand(dc map[string]DepvizCommand) *cobra.Command {
+ cc := &cobra.Command{
+ Use: "airtable",
+ Short: "Upload issue info stored in database to airtable spreadsheets",
+ }
+ cc.AddCommand(cmd.airtableSyncCommand())
+ return cc
+}
+
+func (cmd *airtableCommand) airtableSyncCommand() *cobra.Command {
+ cc := &cobra.Command{
+ Use: "sync",
+ Short: "Upload issue info stored in database to airtable spreadsheets",
+ RunE: func(_ *cobra.Command, args []string) error {
+ opts := cmd.opts
+ var err error
+ if opts.Targets, err = issues.ParseTargets(args); err != nil {
+ return errors.Wrap(err, "invalid targets")
+ }
+ return airtableSync(&opts)
+ },
+ }
+ cmd.ParseFlags(cc.Flags())
+ return cc
+}
+
+// airtableSync pushes issue info to the airtable base specified in opts.
+// Repository info is loaded from the targets specified in opts.
+func airtableSync(opts *airtableOptions) error {
+ if opts.BaseID == "" || opts.Token == "" {
+ return fmt.Errorf("missing token or baseid, check '-h'")
+ }
+
+ //
+ // prepare
+ //
+
+ loadedIssues, err := issues.Load(db, nil)
+ if err != nil {
+ return errors.Wrap(err, "failed to load issues")
+ }
+ loadedIssues = loadedIssues.FilterByTargets(opts.Targets)
+ zap.L().Debug("fetch db entries", zap.Int("count", len(loadedIssues)))
+
+ issueFeatures := make([]map[string]issues.Feature, airtabledb.NumTables)
+ for i, _ := range issueFeatures {
+ issueFeatures[i] = make(map[string]issues.Feature)
+ }
+
+ // Parse the loaded issues into the issueFeature map.
+ for _, issue := range loadedIssues {
+ // providers
+ issueFeatures[airtabledb.ProviderIndex][issue.Repository.Provider.ID] = issue.Repository.Provider
+
+ // labels
+ for _, label := range issue.Labels {
+ issueFeatures[airtabledb.LabelIndex][label.ID] = label
+ }
+
+ // accounts
+ if issue.Repository.Owner != nil {
+ issueFeatures[airtabledb.AccountIndex][issue.Repository.Owner.ID] = issue.Repository.Owner
+ }
+
+ issueFeatures[airtabledb.AccountIndex][issue.Author.ID] = issue.Author
+ for _, assignee := range issue.Assignees {
+ issueFeatures[airtabledb.AccountIndex][assignee.ID] = assignee
+ }
+ if issue.Milestone != nil && issue.Milestone.Creator != nil {
+ issueFeatures[airtabledb.AccountIndex][issue.Milestone.Creator.ID] = issue.Milestone.Creator
+ }
+
+ // repositories
+ issueFeatures[airtabledb.RepositoryIndex][issue.Repository.ID] = issue.Repository
+ // FIXME: find external repositories based on depends-on links
+
+ // milestones
+ if issue.Milestone != nil {
+ issueFeatures[airtabledb.MilestoneIndex][issue.Milestone.ID] = issue.Milestone
+ }
+
+ // issue
+ issueFeatures[airtabledb.IssueIndex][issue.ID] = issue
+ // FIXME: find external issues based on depends-on links
+ }
+
+ client := airtable.Client{
+ APIKey: opts.Token,
+ BaseID: opts.BaseID,
+ Limiter: airtable.RateLimiter(5),
+ }
+
+ // cache stores issueFeatures inserted into the airtable base.
+ cache := airtabledb.NewDB()
+
+ // Store already existing issueFeatures into the cache.
+ for tableKind, tableName := range opts.TableNames {
+ table := client.Table(tableName)
+ if err := cache.Tables[tableKind].Fetch(table); err != nil {
+ return err
+ }
+ }
+
+ // unmatched stores new issueFeatures (exist in the loaded issues but not the airtable base).
+ unmatched := airtabledb.NewDB()
+
+ // Insert new issueFeatures into unmatched and mark altered cache issueFeatures with airtabledb.StateChanged.
+ for tableKind, featureMap := range issueFeatures {
+ for _, dbEntry := range featureMap {
+ matched := false
+ dbRecord := dbEntry.ToRecord(cache)
+ for idx := 0; idx < cache.Tables[tableKind].Len(); idx++ {
+ t := cache.Tables[tableKind]
+ if t.GetFieldID(idx) == dbEntry.GetID() {
+ if t.RecordsEqual(idx, dbRecord) {
+ t.SetState(idx, airtabledb.StateUnchanged)
+ } else {
+ t.CopyFields(idx, dbRecord)
+ t.SetState(idx, airtabledb.StateChanged)
+ }
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ unmatched.Tables[tableKind].Append(dbRecord)
+ }
+ }
+ }
+
+ // Add new issueFeatures from unmatched to cache.
+ // Then, push new and altered issueFeatures from cache to airtable base.
+ for tableKind, tableName := range opts.TableNames {
+ table := client.Table(tableName)
+ ut := unmatched.Tables[tableKind]
+ ct := cache.Tables[tableKind]
+ for i := 0; i < ut.Len(); i++ {
+ zap.L().Debug("create airtable entry", zap.String("type", tableName), zap.String("entry", ut.StringAt(i)))
+ if err := table.Create(ut.GetPtr(i)); err != nil {
+ return err
+ }
+ ut.SetState(i, airtabledb.StateNew)
+ ct.Append(ut.Get(i))
+ }
+ for i := 0; i < ct.Len(); i++ {
+ var err error
+ switch ct.GetState(i) {
+ case airtabledb.StateUnknown:
+ err = table.Delete(ct.GetPtr(i))
+ zap.L().Debug("delete airtable entry", zap.String("type", tableName), zap.String("entry", ct.StringAt(i)), zap.Error(err))
+ case airtabledb.StateChanged:
+ err = table.Update(ct.GetPtr(i))
+ zap.L().Debug("update airtable entry", zap.String("type", tableName), zap.String("entry", ct.StringAt(i)), zap.Error(err))
+ case airtabledb.StateUnchanged:
+ zap.L().Debug("unchanged airtable entry", zap.String("type", tableName), zap.String("entry", ct.StringAt(i)), zap.Error(err))
+ // do nothing
+ case airtabledb.StateNew:
+ zap.L().Debug("new airtable entry", zap.String("type", tableName), zap.String("entry", ct.StringAt(i)), zap.Error(err))
+ // do nothing
+ }
+ }
+ }
+
+ for tableKind, tableName := range opts.TableNames {
+ fmt.Println("-------", tableName)
+ ct := cache.Tables[tableKind]
+ for i := 0; i < ct.Len(); i++ {
+ fmt.Println(ct.GetID(i), airtabledb.StateString[ct.GetState(i)], ct.GetFieldID(i))
+ }
+ }
+ fmt.Println("-------")
+
+ return nil
+}
diff --git a/cli/cmd_db.go b/cli/cmd_db.go
new file mode 100644
index 000000000..ad3ab169e
--- /dev/null
+++ b/cli/cmd_db.go
@@ -0,0 +1,72 @@
+package cli
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+ "moul.io/depviz/pkg/issues"
+)
+
+type dbOptions struct{}
+
+func (opts dbOptions) String() string {
+ out, _ := json.Marshal(opts)
+ return string(out)
+}
+
+type dbCommand struct {
+ opts dbOptions
+}
+
+func (cmd *dbCommand) LoadDefaultOptions() error {
+ if err := viper.Unmarshal(&cmd.opts); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cmd *dbCommand) NewCobraCommand(dc map[string]DepvizCommand) *cobra.Command {
+ cc := &cobra.Command{
+ Use: "db",
+ }
+ cc.AddCommand(cmd.dbDumpCommand())
+ return cc
+}
+
+func (cmd *dbCommand) ParseFlags(flags *pflag.FlagSet) {
+ viper.BindPFlags(flags)
+}
+
+func (cmd *dbCommand) dbDumpCommand() *cobra.Command {
+ cc := &cobra.Command{
+ Use: "dump",
+ Short: "Print all issues stored in the database, formatted as JSON",
+ RunE: func(_ *cobra.Command, args []string) error {
+ opts := cmd.opts
+ return dbDump(&opts)
+ },
+ }
+ cmd.ParseFlags(cc.Flags())
+ return cc
+}
+
+func dbDump(opts *dbOptions) error {
+ issues := []*issues.Issue{}
+ if err := db.Find(&issues).Error; err != nil {
+ return err
+ }
+
+ for _, issue := range issues {
+ issue.PostLoad()
+ }
+
+ out, err := json.MarshalIndent(issues, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(out))
+ return nil
+}
diff --git a/cmd_graph.go b/cli/cmd_graph.go
similarity index 73%
rename from cmd_graph.go
rename to cli/cmd_graph.go
index 0165cc6b2..b92d0c2c7 100644
--- a/cmd_graph.go
+++ b/cli/cmd_graph.go
@@ -1,4 +1,4 @@
-package main
+package cli
import (
"encoding/json"
@@ -6,7 +6,6 @@ import (
"html"
"io"
"math"
- "net/url"
"os"
"sort"
"strings"
@@ -17,62 +16,73 @@ import (
"github.com/spf13/pflag"
"github.com/spf13/viper"
"go.uber.org/zap"
+ "moul.io/depviz/pkg/issues"
)
type graphOptions struct {
- Output string `mapstructure:"output"`
- DebugGraph bool `mapstructure:"debug-graph"`
- NoCompress bool `mapstructure:"no-compress"`
- DarkTheme bool `mapstructure:"dark-theme"`
- ShowClosed bool `mapstructure:"show-closed"`
- ShowOrphans bool `mapstructure:"show-orphans"`
- ShowPRs bool `mapstructure:"show-prs"`
- Preview bool `mapstructure:"preview"`
- Format string `mapstructure:"format"`
- Targets Targets `mapstructure:"targets"`
+ Output string `mapstructure:"output"`
+ DebugGraph bool `mapstructure:"debug-graph"`
+ NoCompress bool `mapstructure:"no-compress"`
+ DarkTheme bool `mapstructure:"dark-theme"`
+ ShowClosed bool `mapstructure:"show-closed"`
+ ShowOrphans bool `mapstructure:"show-orphans"`
+ ShowPRs bool `mapstructure:"show-prs"`
+ Preview bool `mapstructure:"preview"`
+ Format string `mapstructure:"format"`
+ Targets issues.Targets `mapstructure:"targets"`
// FocusMode
// NoExternal
}
-var globalGraphOptions graphOptions
-
func (opts graphOptions) String() string {
out, _ := json.Marshal(opts)
return string(out)
}
-func graphSetupFlags(flags *pflag.FlagSet, opts *graphOptions) {
- flags.BoolVarP(&opts.ShowClosed, "show-closed", "", false, "show closed issues")
- flags.BoolVarP(&opts.DebugGraph, "debug-graph", "", false, "debug graph")
- flags.BoolVarP(&opts.ShowOrphans, "show-orphans", "", false, "show issues not linked to an epic")
- flags.BoolVarP(&opts.NoCompress, "no-compress", "", false, "do not compress graph (no overlap)")
- flags.BoolVarP(&opts.DarkTheme, "dark-theme", "", false, "dark theme")
- flags.BoolVarP(&opts.ShowPRs, "show-prs", "", false, "show PRs")
- flags.StringVarP(&opts.Output, "output", "o", "-", "output file ('-' for stdout, dot)")
- flags.StringVarP(&opts.Format, "format", "f", "", "output file format (if empty, will determine thanks to output extension)")
+type graphCommand struct {
+ opts graphOptions
+}
+
+func (cmd *graphCommand) LoadDefaultOptions() error {
+ if err := viper.Unmarshal(&cmd.opts); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cmd *graphCommand) ParseFlags(flags *pflag.FlagSet) {
+ flags.BoolVarP(&cmd.opts.ShowClosed, "show-closed", "", false, "show closed issues")
+ flags.BoolVarP(&cmd.opts.DebugGraph, "debug-graph", "", false, "debug graph")
+ flags.BoolVarP(&cmd.opts.ShowOrphans, "show-orphans", "", false, "show issues not linked to an epic")
+ flags.BoolVarP(&cmd.opts.NoCompress, "no-compress", "", false, "do not compress graph (no overlap)")
+ flags.BoolVarP(&cmd.opts.DarkTheme, "dark-theme", "", false, "dark theme")
+ flags.BoolVarP(&cmd.opts.ShowPRs, "show-prs", "", false, "show PRs")
+ flags.StringVarP(&cmd.opts.Output, "output", "o", "-", "output file ('-' for stdout, dot)")
+ flags.StringVarP(&cmd.opts.Format, "format", "f", "", "output file format (if empty, will determine thanks to output extension)")
//flags.BoolVarP(&opts.Preview, "preview", "p", false, "preview result")
viper.BindPFlags(flags)
}
-func newGraphCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "graph",
- RunE: func(cmd *cobra.Command, args []string) error {
- opts := globalGraphOptions
+func (cmd *graphCommand) NewCobraCommand(dc map[string]DepvizCommand) *cobra.Command {
+ cc := &cobra.Command{
+ Use: "graph",
+ Short: "Output graph of relationships between all issues stored in database",
+ RunE: func(_ *cobra.Command, args []string) error {
+ opts := cmd.opts
var err error
- if opts.Targets, err = ParseTargets(args); err != nil {
+ if opts.Targets, err = issues.ParseTargets(args); err != nil {
return errors.Wrap(err, "invalid targets")
}
return graph(&opts)
},
}
- graphSetupFlags(cmd.Flags(), &globalGraphOptions)
- return cmd
+ cmd.ParseFlags(cc.Flags())
+ return cc
}
func graph(opts *graphOptions) error {
- logger().Debug("graph", zap.Stringer("opts", *opts))
- issues, err := loadIssues(nil)
+ zap.L().Debug("graph", zap.Stringer("opts", *opts))
+ issues, err := issues.Load(db, nil)
if err != nil {
return errors.Wrap(err, "failed to load issues")
}
@@ -107,7 +117,7 @@ func graph(opts *graphOptions) error {
return nil
}
-func isIssueHidden(issue *Issue, opts *graphOptions) bool {
+func isIssueHidden(issue *issues.Issue, opts *graphOptions) bool {
if issue.IsHidden {
return true
}
@@ -123,7 +133,7 @@ func isIssueHidden(issue *Issue, opts *graphOptions) bool {
return false
}
-func graphviz(issues Issues, opts *graphOptions) (string, error) {
+func graphviz(issues issues.Issues, opts *graphOptions) (string, error) {
for _, issue := range issues {
if isIssueHidden(issue, opts) {
continue
@@ -221,13 +231,13 @@ func graphviz(issues Issues, opts *graphOptions) (string, error) {
}
existingNodes[issue.URL] = true
- panicIfErr(issue.AddNodeToGraph(g, parent))
+ panicIfErr(AddNodeToGraph(g, issue, parent))
stats["nodes"]++
}
// issue relationships
for _, issue := range issues {
- panicIfErr(issue.AddEdgesToGraph(g, opts, existingNodes))
+ panicIfErr(AddEdgesToGraph(g, issue, opts, existingNodes))
stats["edges"]++
}
@@ -326,13 +336,13 @@ func graphviz(issues Issues, opts *graphOptions) (string, error) {
stats["edges"]++
}
- logger().Debug("graph stats", zap.Any("stats", stats))
+ zap.L().Debug("graph stats", zap.Any("stats", stats))
return g.String(), nil
}
-func (i Issue) AddNodeToGraph(g *gographviz.Graph, parent string) error {
+func AddNodeToGraph(g *gographviz.Graph, i *issues.Issue, parent string) error {
attrs := map[string]string{}
- attrs["label"] = i.GraphNodeTitle()
+ attrs["label"] = GraphNodeTitle(i)
//attrs["xlabel"] = ""
attrs["shape"] = "record"
attrs["style"] = `"rounded,filled"`
@@ -367,8 +377,8 @@ func (i Issue) AddNodeToGraph(g *gographviz.Graph, parent string) error {
)
}
-func (i Issue) AddEdgesToGraph(g *gographviz.Graph, opts *graphOptions, existingNodes map[string]bool) error {
- if isIssueHidden(&i, opts) {
+func AddEdgesToGraph(g *gographviz.Graph, i *issues.Issue, opts *graphOptions, existingNodes map[string]bool) error {
+ if isIssueHidden(i, opts) {
return nil
}
for _, dependency := range i.Parents {
@@ -407,30 +417,12 @@ func (i Issue) AddEdgesToGraph(g *gographviz.Graph, opts *graphOptions, existing
return nil
}
-func (i Issue) GraphNodeName() string {
+func GraphNodeName(i *issues.Issue) string {
return fmt.Sprintf(`%s#%s`, i.Path()[1:], i.Number())
}
-func (i Issue) Number() string {
- u, err := url.Parse(i.URL)
- if err != nil {
- return ""
- }
- parts := strings.Split(u.Path, "/")
- return parts[len(parts)-1]
-}
-
-func (i Issue) Path() string {
- u, err := url.Parse(i.URL)
- if err != nil {
- return ""
- }
- parts := strings.Split(u.Path, "/")
- return strings.Join(parts[:len(parts)-2], "/")
-}
-
-func (i Issue) GraphNodeTitle() string {
- title := fmt.Sprintf("%s: %s", i.GraphNodeName(), i.Title)
+func GraphNodeTitle(i *issues.Issue) string {
+ title := fmt.Sprintf("%s: %s", GraphNodeName(i), i.Title)
title = strings.Replace(title, "|", "-", -1)
title = strings.Replace(html.EscapeString(wrap(title, 20)), "\n", "
", -1)
labels := []string{}
@@ -459,3 +451,33 @@ func (i Issue) GraphNodeTitle() string {
}
return fmt.Sprintf(`<
>`, title, labelsText, assigneeText, errorsText)
}
+
+func panicIfErr(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+func escape(input string) string {
+ return fmt.Sprintf("%q", input)
+}
+
+func wrap(text string, lineWidth int) string {
+ words := strings.Fields(strings.TrimSpace(text))
+ if len(words) == 0 {
+ return text
+ }
+ wrapped := words[0]
+ spaceLeft := lineWidth - len(wrapped)
+ for _, word := range words[1:] {
+ if len(word)+1 > spaceLeft {
+ wrapped += "\n" + word
+ spaceLeft = lineWidth - len(word)
+ } else {
+ wrapped += " " + word
+ spaceLeft -= 1 + len(word)
+ }
+ }
+
+ return wrapped
+}
diff --git a/cli/cmd_pull.go b/cli/cmd_pull.go
new file mode 100644
index 000000000..fdcd5dd34
--- /dev/null
+++ b/cli/cmd_pull.go
@@ -0,0 +1,75 @@
+package cli
+
+import (
+ "encoding/json"
+ "os"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+ "go.uber.org/zap"
+ "moul.io/depviz/pkg/issues"
+)
+
+type pullOptions struct {
+ // pull
+ GithubToken string `mapstructure:"github-token"`
+ GitlabToken string `mapstructure:"gitlab-token"`
+ // includeExternalDeps bool
+
+ Targets issues.Targets `mapstructure:"targets"`
+}
+
+func (opts pullOptions) String() string {
+ out, _ := json.Marshal(opts)
+ return string(out)
+}
+
+type pullCommand struct {
+ opts pullOptions
+}
+
+func (cmd *pullCommand) LoadDefaultOptions() error {
+ if err := viper.Unmarshal(&cmd.opts); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cmd *pullCommand) ParseFlags(flags *pflag.FlagSet) {
+ flags.StringVarP(&cmd.opts.GithubToken, "github-token", "", "", "GitHub Token with 'issues' access")
+ flags.StringVarP(&cmd.opts.GitlabToken, "gitlab-token", "", "", "GitLab Token with 'issues' access")
+ viper.BindPFlags(flags)
+}
+
+func (cmd *pullCommand) NewCobraCommand(dc map[string]DepvizCommand) *cobra.Command {
+ cc := &cobra.Command{
+ Use: "pull",
+ Short: "Pull issues and update database without outputting graph",
+ RunE: func(_ *cobra.Command, args []string) error {
+ opts := cmd.opts
+ var err error
+ if opts.Targets, err = issues.ParseTargets(args); err != nil {
+ return errors.Wrap(err, "invalid targets")
+ }
+ return pullAndCompute(&opts)
+ },
+ }
+ cmd.ParseFlags(cc.Flags())
+ return cc
+}
+
+func pullAndCompute(opts *pullOptions) error {
+ zap.L().Debug("pull", zap.Stringer("opts", *opts))
+ if os.Getenv("DEPVIZ_NOPULL") != "1" {
+ if err := issues.PullAndCompute(opts.GithubToken, opts.GitlabToken, db, opts.Targets); err != nil {
+ return errors.Wrap(err, "failed to pull")
+ }
+ } else {
+ if err := issues.Compute(db); err != nil {
+ return errors.Wrap(err, "failed to compute")
+ }
+ }
+ return nil
+}
diff --git a/cli/cmd_run.go b/cli/cmd_run.go
new file mode 100644
index 000000000..2dceff28c
--- /dev/null
+++ b/cli/cmd_run.go
@@ -0,0 +1,80 @@
+package cli
+
+import (
+ "encoding/json"
+
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+ "moul.io/depviz/pkg/issues"
+)
+
+type runOptions struct {
+ GraphOptions graphOptions `mapstructure:"graph"`
+ PullOptions pullOptions `mapstructure:"pull"`
+ AdditionalPulls []string `mapstructure:"additional-pulls"`
+ NoPull bool `mapstructure:"no-pull"`
+}
+
+func (opts runOptions) String() string {
+ out, _ := json.Marshal(opts)
+ return string(out)
+}
+
+type runCommand struct {
+ opts runOptions
+}
+
+func (cmd *runCommand) LoadDefaultOptions() error {
+ if err := viper.Unmarshal(&cmd.opts); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cmd *runCommand) ParseFlags(flags *pflag.FlagSet) {
+ flags.BoolVarP(&cmd.opts.NoPull, "no-pull", "", false, "do not pull new issues before running")
+ flags.StringSliceVarP(&cmd.opts.AdditionalPulls, "additional-pulls", "", []string{}, "additional pull that won't necessarily be displayed on the graph")
+ viper.BindPFlags(flags)
+}
+
+func (cmd *runCommand) NewCobraCommand(dc map[string]DepvizCommand) *cobra.Command {
+ cc := &cobra.Command{
+ Use: "run",
+ Short: "Pull issues, update database, and output a graph of relationships between issues",
+ RunE: func(_ *cobra.Command, args []string) error {
+ opts := cmd.opts
+ opts.GraphOptions = dc["graph"].(*graphCommand).opts
+ opts.PullOptions = dc["pull"].(*pullCommand).opts
+
+ targets, err := issues.ParseTargets(args)
+ if err != nil {
+ return errors.Wrap(err, "invalid targets")
+ }
+ additionalPulls, err := issues.ParseTargets(opts.AdditionalPulls)
+ if err != nil {
+ return errors.Wrap(err, "invalid targets")
+ }
+ opts.PullOptions.Targets = append(targets, additionalPulls...)
+ opts.GraphOptions.Targets = targets
+ return run(&opts)
+ },
+ }
+ cmd.ParseFlags(cc.Flags())
+ dc["graph"].ParseFlags(cc.Flags())
+ dc["pull"].ParseFlags(cc.Flags())
+ return cc
+}
+
+func run(opts *runOptions) error {
+ if !opts.NoPull {
+ if err := pullAndCompute(&opts.PullOptions); err != nil {
+ return errors.Wrap(err, "failed to pull")
+ }
+ }
+ if err := graph(&opts.GraphOptions); err != nil {
+ return errors.Wrap(err, "failed to graph")
+ }
+ return nil
+}
diff --git a/cmd_web.go b/cli/cmd_web.go
similarity index 73%
rename from cmd_web.go
rename to cli/cmd_web.go
index 9d92c30ba..9278df0fd 100644
--- a/cmd_web.go
+++ b/cli/cmd_web.go
@@ -1,4 +1,4 @@
-package main
+package cli
import (
"bytes"
@@ -19,6 +19,7 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
+ "moul.io/depviz/pkg/issues"
)
type webOptions struct {
@@ -27,37 +28,44 @@ type webOptions struct {
ShowRoutes bool
}
-var globalWebOptions webOptions
-
func (opts webOptions) String() string {
out, _ := json.Marshal(opts)
return string(out)
}
-func webSetupFlags(flags *pflag.FlagSet, opts *webOptions) {
- flags.StringVarP(&opts.Bind, "bind", "b", ":2020", "web server bind address")
- flags.BoolVarP(&opts.ShowRoutes, "show-routes", "", false, "display available routes and quit")
+type webCommand struct {
+ opts webOptions
+}
+
+func (cmd *webCommand) LoadDefaultOptions() error {
+ if err := viper.Unmarshal(&cmd.opts); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cmd *webCommand) ParseFlags(flags *pflag.FlagSet) {
+ flags.StringVarP(&cmd.opts.Bind, "bind", "b", ":2020", "web server bind address")
+ flags.BoolVarP(&cmd.opts.ShowRoutes, "show-routes", "", false, "display available routes and quit")
viper.BindPFlags(flags)
}
-func newWebCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "web",
- RunE: func(cmd *cobra.Command, args []string) error {
- opts := globalWebOptions
+func (cmd *webCommand) NewCobraCommand(dc map[string]DepvizCommand) *cobra.Command {
+ cc := &cobra.Command{
+ Use: "web",
+ Short: "Run depviz as a web server",
+ RunE: func(_ *cobra.Command, args []string) error {
+ opts := cmd.opts
return web(&opts)
},
}
- webSetupFlags(cmd.Flags(), &globalWebOptions)
- return cmd
-}
-
-func (i *Issue) Render(w http.ResponseWriter, r *http.Request) error {
- return nil
+ cmd.ParseFlags(cc.Flags())
+ return cc
}
+// webListIssues loads the issues stored in the database and writes them to the http response.
func webListIssues(w http.ResponseWriter, r *http.Request) {
- issues, err := loadIssues(nil)
+ issues, err := issues.Load(db, nil)
if err != nil {
render.Render(w, r, ErrRender(err))
return
@@ -78,7 +86,7 @@ func webListIssues(w http.ResponseWriter, r *http.Request) {
}
func webGraphviz(r *http.Request) (string, error) {
- targets, err := ParseTargets(strings.Split(r.URL.Query().Get("targets"), ","))
+ targets, err := issues.ParseTargets(strings.Split(r.URL.Query().Get("targets"), ","))
if err != nil {
return "", err
}
@@ -86,7 +94,7 @@ func webGraphviz(r *http.Request) (string, error) {
Targets: targets,
ShowClosed: r.URL.Query().Get("show-closed") == "1",
}
- issues, err := loadIssues(nil)
+ issues, err := issues.Load(db, nil)
if err != nil {
return "", err
}
diff --git a/cli/root.go b/cli/root.go
new file mode 100644
index 000000000..18b5ee3d7
--- /dev/null
+++ b/cli/root.go
@@ -0,0 +1,128 @@
+package cli // import "moul.io/depviz/cli"
+
+import (
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+
+ "github.com/jinzhu/gorm"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "moul.io/depviz/pkg/issues"
+ "moul.io/zapgorm"
+)
+
+var (
+ verbose bool
+ cfgFile string
+ dbPath string
+ db *gorm.DB
+)
+
+// DepvizCommand represents a subcommand which can be selected when running depviz.
+type DepvizCommand interface {
+ // NewCobraCommand translates the DepvizCommand to a *cobra.Command.
+ NewCobraCommand(map[string]DepvizCommand) *cobra.Command
+
+ // Load default run options from config file.
+ LoadDefaultOptions() error
+
+ // Parse the flags given on the command line, overwriting any default options.
+ ParseFlags(*pflag.FlagSet)
+}
+
+func NewRootCommand() *cobra.Command {
+ rootCmd := &cobra.Command{
+ Use: "depviz",
+ }
+ rootCmd.PersistentFlags().BoolP("help", "h", false, "print usage")
+ rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose mode")
+ rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file (default is ./.depviz.yml)")
+ rootCmd.PersistentFlags().StringVarP(&dbPath, "db-path", "", "$HOME/.depviz.db", "database path")
+
+ // Add commands here.
+ cmds := map[string]DepvizCommand{
+ "pull": &pullCommand{},
+ "db": &dbCommand{},
+ "airtable": &airtableCommand{},
+ "graph": &graphCommand{},
+ "run": &runCommand{},
+ "web": &webCommand{},
+ }
+
+ rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
+ // configure zap
+ config := zap.NewDevelopmentConfig()
+ if verbose {
+ config.Level.SetLevel(zapcore.DebugLevel)
+ } else {
+ config.Level.SetLevel(zapcore.InfoLevel)
+ }
+ config.DisableStacktrace = true
+ config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
+ l, err := config.Build()
+ if err != nil {
+ return err
+ }
+ zap.ReplaceGlobals(l)
+
+ // configure viper
+ if cfgFile != "" {
+ viper.SetConfigFile(cfgFile)
+ } else {
+ viper.AddConfigPath(".")
+ viper.SetConfigName(".depviz")
+ }
+ if err := viper.MergeInConfig(); err != nil {
+ if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
+ return errors.Wrap(err, "cannot read config")
+ }
+ }
+
+ for _, cmd := range cmds {
+ if err := cmd.LoadDefaultOptions(); err != nil {
+ return err
+ }
+ }
+
+ // configure sql
+ dbPath = os.ExpandEnv(dbPath)
+ db, err = gorm.Open("sqlite3", dbPath)
+ if err != nil {
+ return err
+ }
+ log.SetOutput(ioutil.Discard)
+ db.Callback().Create().Remove("gorm:update_time_stamp")
+ db.Callback().Update().Remove("gorm:update_time_stamp")
+ log.SetOutput(os.Stderr)
+ db.SetLogger(zapgorm.New(zap.L().Named("vendor.gorm")))
+ db = db.Set("gorm:auto_preload", true)
+ db = db.Set("gorm:association_autoupdate", true)
+ db.BlockGlobalUpdate(true)
+ db.SingularTable(true)
+ db.LogMode(verbose)
+ if err := db.AutoMigrate(
+ issues.Issue{},
+ issues.Label{},
+ issues.Account{},
+ issues.Milestone{},
+ issues.Repository{},
+ issues.Provider{},
+ ).Error; err != nil {
+ return err
+ }
+
+ return nil
+ }
+ for _, cmd := range cmds {
+ rootCmd.AddCommand(cmd.NewCobraCommand(cmds))
+ }
+ viper.AutomaticEnv()
+ viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
+ return rootCmd
+}
diff --git a/cmd_airtable.go b/cmd_airtable.go
deleted file mode 100644
index 28cc0da7b..000000000
--- a/cmd_airtable.go
+++ /dev/null
@@ -1,518 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/brianloveswords/airtable"
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
- "github.com/spf13/viper"
- "go.uber.org/zap"
-)
-
-type airtableOptions struct {
- IssuesTableName string `mapstructure:"airtable-issues-table-name"`
- RepositoriesTableName string `mapstructure:"airtable-repositories-table-name"`
- LabelsTableName string `mapstructure:"airtable-labels-table-name"`
- MilestonesTableName string `mapstructure:"airtable-milestones-table-name"`
- ProvidersTableName string `mapstructure:"airtable-providers-table-name"`
- AccountsTableName string `mapstructure:"airtable-accounts-table-name"`
- BaseID string `mapstructure:"airtable-base-id"`
- Token string `mapstructure:"airtable-token"`
- DestroyInvalidRecords bool `mapstructure:"airtable-destroy-invalid-records"`
-
- Targets []Target `mapstructure:"targets"`
-}
-
-var globalAirtableOptions airtableOptions
-
-func (opts airtableOptions) String() string {
- out, _ := json.Marshal(opts)
- return string(out)
-}
-
-func airtableSetupFlags(flags *pflag.FlagSet, opts *airtableOptions) {
- flags.StringVarP(&opts.IssuesTableName, "airtable-issues-table-name", "", "Issues and PRs", "Airtable issues table name")
- flags.StringVarP(&opts.RepositoriesTableName, "airtable-repositories-table-name", "", "Repositories", "Airtable repositories table name")
- flags.StringVarP(&opts.AccountsTableName, "airtable-accounts-table-name", "", "Accounts", "Airtable accounts table name")
- flags.StringVarP(&opts.LabelsTableName, "airtable-labels-table-name", "", "Labels", "Airtable labels table name")
- flags.StringVarP(&opts.MilestonesTableName, "airtable-milestones-table-name", "", "Milestones", "Airtable milestones table nfame")
- flags.StringVarP(&opts.ProvidersTableName, "airtable-providers-table-name", "", "Providers", "Airtable providers table name")
- flags.StringVarP(&opts.BaseID, "airtable-base-id", "", "", "Airtable base ID")
- flags.StringVarP(&opts.Token, "airtable-token", "", "", "Airtable token")
- flags.BoolVarP(&opts.DestroyInvalidRecords, "airtable-destroy-invalid-records", "", false, "Destroy invalid records")
- viper.BindPFlags(flags)
-}
-
-func newAirtableCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "airtable",
- }
- cmd.AddCommand(newAirtableSyncCommand())
- return cmd
-}
-
-func newAirtableSyncCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "sync",
- RunE: func(cmd *cobra.Command, args []string) error {
- opts := globalAirtableOptions
- var err error
- if opts.Targets, err = ParseTargets(args); err != nil {
- return errors.Wrap(err, "invalid targets")
- }
- return airtableSync(&opts)
- },
- }
- airtableSetupFlags(cmd.Flags(), &globalAirtableOptions)
- return cmd
-}
-
-type AirtableDB struct {
- Providers ProviderRecords
- Labels LabelRecords
- Accounts AccountRecords
- Repositories RepositoryRecords
- Milestones MilestoneRecords
- Issues IssueRecords
-}
-
-func airtableSync(opts *airtableOptions) error {
- if opts.BaseID == "" || opts.Token == "" {
- return fmt.Errorf("missing token or baseid, check '-h'")
- }
-
- //
- // prepare
- //
-
- // load issues
- issues, err := loadIssues(nil)
- if err != nil {
- return errors.Wrap(err, "failed to load issues")
- }
- filtered := issues.FilterByTargets(opts.Targets)
- logger().Debug("fetch db entries", zap.Int("count", len(filtered)))
-
- // unique entries
- var (
- providerMap = make(map[string]*Provider)
- labelMap = make(map[string]*Label)
- accountMap = make(map[string]*Account)
- repositoryMap = make(map[string]*Repository)
- milestoneMap = make(map[string]*Milestone)
- issueMap = make(map[string]*Issue)
- )
- for _, issue := range filtered {
- // providers
- providerMap[issue.Repository.Provider.ID] = issue.Repository.Provider
-
- // labels
- for _, label := range issue.Labels {
- labelMap[label.ID] = label
- }
-
- // accounts
- if issue.Repository.Owner != nil {
- accountMap[issue.Repository.Owner.ID] = issue.Repository.Owner
- }
- accountMap[issue.Author.ID] = issue.Author
- for _, assignee := range issue.Assignees {
- accountMap[assignee.ID] = assignee
- }
- if issue.Milestone != nil && issue.Milestone.Creator != nil {
- accountMap[issue.Milestone.Creator.ID] = issue.Milestone.Creator
- }
-
- // repositories
- repositoryMap[issue.Repository.ID] = issue.Repository
- // FIXME: find external repositories based on depends-on links
-
- // milestones
- if issue.Milestone != nil {
- milestoneMap[issue.Milestone.ID] = issue.Milestone
- }
-
- // issue
- issueMap[issue.ID] = issue
- // FIXME: find external issues based on depends-on links
- }
-
- // init client
- at := airtable.Client{
- APIKey: opts.Token,
- BaseID: opts.BaseID,
- Limiter: airtable.RateLimiter(5),
- }
-
- // fetch remote data
- cache := AirtableDB{}
- table := at.Table(opts.ProvidersTableName)
- if err := table.List(&cache.Providers, &airtable.Options{}); err != nil {
- return err
- }
- table = at.Table(opts.LabelsTableName)
- if err := table.List(&cache.Labels, &airtable.Options{}); err != nil {
- return err
- }
- table = at.Table(opts.AccountsTableName)
- if err := table.List(&cache.Accounts, &airtable.Options{}); err != nil {
- return err
- }
- table = at.Table(opts.RepositoriesTableName)
- if err := table.List(&cache.Repositories, &airtable.Options{}); err != nil {
- return err
- }
- table = at.Table(opts.MilestonesTableName)
- if err := table.List(&cache.Milestones, &airtable.Options{}); err != nil {
- return err
- }
- table = at.Table(opts.IssuesTableName)
- if err := table.List(&cache.Issues, &airtable.Options{}); err != nil {
- return err
- }
-
- unmatched := AirtableDB{
- Providers: ProviderRecords{},
- Labels: LabelRecords{},
- Accounts: AccountRecords{},
- Repositories: RepositoryRecords{},
- Milestones: MilestoneRecords{},
- Issues: IssueRecords{},
- }
-
- //
- // compute fields
- //
-
- // providers
- for _, dbEntry := range providerMap {
- matched := false
- dbRecord := dbEntry.ToRecord(cache)
- for idx, atEntry := range cache.Providers {
- if atEntry.Fields.ID == dbEntry.ID {
- if atEntry.Equals(dbRecord) {
- cache.Providers[idx].State = airtableStateUnchanged
- } else {
- cache.Providers[idx].Fields = dbRecord.Fields
- cache.Providers[idx].State = airtableStateChanged
- }
- matched = true
- break
- }
- }
- if !matched {
- unmatched.Providers = append(unmatched.Providers, *dbRecord)
- }
- }
-
- // labels
- for _, dbEntry := range labelMap {
- matched := false
- dbRecord := dbEntry.ToRecord(cache)
- for idx, atEntry := range cache.Labels {
- if atEntry.Fields.ID == dbEntry.ID {
- if atEntry.Equals(dbRecord) {
- cache.Labels[idx].State = airtableStateUnchanged
- } else {
- cache.Labels[idx].Fields = dbRecord.Fields
- cache.Labels[idx].State = airtableStateChanged
- }
- matched = true
- break
- }
- }
- if !matched {
- unmatched.Labels = append(unmatched.Labels, *dbRecord)
- }
- }
-
- // accounts
- for _, dbEntry := range accountMap {
- matched := false
- dbRecord := dbEntry.ToRecord(cache)
- for idx, atEntry := range cache.Accounts {
- if atEntry.Fields.ID == dbEntry.ID {
- if atEntry.Equals(dbRecord) {
- cache.Accounts[idx].State = airtableStateUnchanged
- } else {
- cache.Accounts[idx].Fields = dbRecord.Fields
- cache.Accounts[idx].State = airtableStateChanged
- }
- matched = true
- break
- }
- }
- if !matched {
- unmatched.Accounts = append(unmatched.Accounts, *dbRecord)
- }
- }
-
- // repositories
- for _, dbEntry := range repositoryMap {
- matched := false
- dbRecord := dbEntry.ToRecord(cache)
- for idx, atEntry := range cache.Repositories {
- if atEntry.Fields.ID == dbEntry.ID {
- if atEntry.Equals(dbRecord) {
- cache.Repositories[idx].State = airtableStateUnchanged
- } else {
- cache.Repositories[idx].Fields = dbRecord.Fields
- cache.Repositories[idx].State = airtableStateChanged
- }
- matched = true
- break
- }
- }
- if !matched {
- unmatched.Repositories = append(unmatched.Repositories, *dbRecord)
- }
- }
-
- // milestones
- for _, dbEntry := range milestoneMap {
- matched := false
- dbRecord := dbEntry.ToRecord(cache)
- for idx, atEntry := range cache.Milestones {
- if atEntry.Fields.ID == dbEntry.ID {
- if atEntry.Equals(dbRecord) {
- cache.Milestones[idx].State = airtableStateUnchanged
- } else {
- cache.Milestones[idx].Fields = dbRecord.Fields
- cache.Milestones[idx].State = airtableStateChanged
- }
- matched = true
- break
- }
- }
- if !matched {
- unmatched.Milestones = append(unmatched.Milestones, *dbRecord)
- }
- }
-
- // issues
- for _, dbEntry := range issueMap {
- matched := false
- dbRecord := dbEntry.ToRecord(cache)
- for idx, atEntry := range cache.Issues {
- if atEntry.Fields.ID == dbEntry.ID {
- if atEntry.Equals(dbRecord) {
- cache.Issues[idx].State = airtableStateUnchanged
- } else {
- cache.Issues[idx].Fields = dbRecord.Fields
- cache.Issues[idx].State = airtableStateChanged
- }
- matched = true
- break
- }
- }
- if !matched {
- unmatched.Issues = append(unmatched.Issues, *dbRecord)
- }
- }
-
- //
- // update airtable
- //
-
- // providers
- table = at.Table(opts.ProvidersTableName)
- for _, entry := range unmatched.Providers {
- logger().Debug("create airtable entry", zap.String("type", "provider"), zap.Stringer("entry", entry))
- if err := table.Create(&entry); err != nil {
- return err
- }
- entry.State = airtableStateNew
- cache.Providers = append(cache.Providers, entry)
- }
- for _, entry := range cache.Providers {
- var err error
- switch entry.State {
- case airtableStateUnknown:
- err = table.Delete(&entry)
- logger().Debug("delete airtable entry", zap.String("type", "provider"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateChanged:
- err = table.Update(&entry)
- logger().Debug("update airtable entry", zap.String("type", "provider"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateUnchanged:
- logger().Debug("unchanged airtable entry", zap.String("type", "provider"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- case airtableStateNew:
- logger().Debug("new airtable entry", zap.String("type", "provider"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- }
- }
-
- // labels
- table = at.Table(opts.LabelsTableName)
- for _, entry := range unmatched.Labels {
- logger().Debug("create airtable entry", zap.String("type", "label"), zap.Stringer("entry", entry))
- if err := table.Create(&entry); err != nil {
- return err
- }
- entry.State = airtableStateNew
- cache.Labels = append(cache.Labels, entry)
- }
- for _, entry := range cache.Labels {
- var err error
- switch entry.State {
- case airtableStateUnknown:
- err = table.Delete(&entry)
- logger().Debug("delete airtable entry", zap.String("type", "label"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateChanged:
- err = table.Update(&entry)
- logger().Debug("update airtable entry", zap.String("type", "label"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateUnchanged:
- logger().Debug("unchanged airtable entry", zap.String("type", "label"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- case airtableStateNew:
- logger().Debug("new airtable entry", zap.String("type", "label"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- }
- }
-
- // accounts
- table = at.Table(opts.AccountsTableName)
- for _, entry := range unmatched.Accounts {
- logger().Debug("create airtable entry", zap.String("type", "account"), zap.Stringer("entry", entry))
- if err := table.Create(&entry); err != nil {
- return err
- }
- entry.State = airtableStateNew
- cache.Accounts = append(cache.Accounts, entry)
- }
- for _, entry := range cache.Accounts {
- var err error
- switch entry.State {
- case airtableStateUnknown:
- err = table.Delete(&entry)
- logger().Debug("delete airtable entry", zap.String("type", "account"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateChanged:
- err = table.Update(&entry)
- logger().Debug("update airtable entry", zap.String("type", "account"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateUnchanged:
- logger().Debug("unchanged airtable entry", zap.String("type", "account"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- case airtableStateNew:
- logger().Debug("new airtable entry", zap.String("type", "account"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- }
- }
-
- // repositories
- table = at.Table(opts.RepositoriesTableName)
- for _, entry := range unmatched.Repositories {
- logger().Debug("create airtable entry", zap.String("type", "repository"), zap.Stringer("entry", entry))
- if err := table.Create(&entry); err != nil {
- return err
- }
- entry.State = airtableStateNew
- cache.Repositories = append(cache.Repositories, entry)
- }
- for _, entry := range cache.Repositories {
- var err error
- switch entry.State {
- case airtableStateUnknown:
- err = table.Delete(&entry)
- logger().Debug("delete airtable entry", zap.String("type", "repository"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateChanged:
- err = table.Update(&entry)
- logger().Debug("update airtable entry", zap.String("type", "repository"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateUnchanged:
- logger().Debug("unchanged airtable entry", zap.String("type", "repository"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- case airtableStateNew:
- logger().Debug("new airtable entry", zap.String("type", "repository"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- }
- }
-
- // milestones
- table = at.Table(opts.MilestonesTableName)
- for _, entry := range unmatched.Milestones {
- logger().Debug("create airtable entry", zap.String("type", "milestone"), zap.Stringer("entry", entry))
- if err := table.Create(&entry); err != nil {
- return err
- }
- entry.State = airtableStateNew
- cache.Milestones = append(cache.Milestones, entry)
- }
- for _, entry := range cache.Milestones {
- var err error
- switch entry.State {
- case airtableStateUnknown:
- err = table.Delete(&entry)
- logger().Debug("delete airtable entry", zap.String("type", "milestone"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateChanged:
- err = table.Update(&entry)
- logger().Debug("update airtable entry", zap.String("type", "milestone"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateUnchanged:
- logger().Debug("unchanged airtable entry", zap.String("type", "milestone"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- case airtableStateNew:
- logger().Debug("new airtable entry", zap.String("type", "milestone"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- }
- }
-
- // issues
- table = at.Table(opts.IssuesTableName)
- for _, entry := range unmatched.Issues {
- logger().Debug("create airtable entry", zap.String("type", "issue"), zap.Stringer("entry", entry))
- if err := table.Create(&entry); err != nil {
- return err
- }
- entry.State = airtableStateNew
- cache.Issues = append(cache.Issues, entry)
- }
- for _, entry := range cache.Issues {
- var err error
- switch entry.State {
- case airtableStateUnknown:
- err = table.Delete(&entry)
- logger().Debug("delete airtable entry", zap.String("type", "issue"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateChanged:
- err = table.Update(&entry)
- logger().Debug("update airtable entry", zap.String("type", "issue"), zap.Stringer("entry", entry), zap.Error(err))
- case airtableStateUnchanged:
- logger().Debug("unchanged airtable entry", zap.String("type", "issue"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- case airtableStateNew:
- logger().Debug("new airtable entry", zap.String("type", "issue"), zap.Stringer("entry", entry), zap.Error(err))
- // do nothing
- }
- }
-
- //
- // debug
- //
- fmt.Println("------- providers")
- for _, entry := range cache.Providers {
- fmt.Println(entry.ID, airtableStateString[entry.State], entry.Fields.ID)
- }
- fmt.Println("------- labels")
- for _, entry := range cache.Labels {
- fmt.Println(entry.ID, airtableStateString[entry.State], entry.Fields.ID)
- }
- fmt.Println("------- accounts")
- for _, entry := range cache.Accounts {
- fmt.Println(entry.ID, airtableStateString[entry.State], entry.Fields.ID)
- }
- fmt.Println("------- repositories")
- for _, entry := range cache.Repositories {
- fmt.Println(entry.ID, airtableStateString[entry.State], entry.Fields.ID)
- }
- fmt.Println("------- milestones")
- for _, entry := range cache.Milestones {
- fmt.Println(entry.ID, airtableStateString[entry.State], entry.Fields.ID)
- }
- fmt.Println("------- issues")
- for _, entry := range cache.Issues {
- fmt.Println(entry.ID, airtableStateString[entry.State], entry.Fields.ID)
- }
- fmt.Println("-------")
-
- return nil
-}
diff --git a/cmd_db.go b/cmd_db.go
deleted file mode 100644
index 43943281e..000000000
--- a/cmd_db.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
- "github.com/spf13/viper"
-)
-
-type dbOptions struct{}
-
-var globalDBOptions dbOptions
-
-func (opts dbOptions) String() string {
- out, _ := json.Marshal(opts)
- return string(out)
-}
-
-func dbSetupFlags(flags *pflag.FlagSet, opts *dbOptions) {
- viper.BindPFlags(flags)
-}
-
-func newDBCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "db",
- }
- cmd.AddCommand(newDBDumpCommand())
- return cmd
-}
-
-func newDBDumpCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "dump",
- RunE: func(cmd *cobra.Command, args []string) error {
- opts := globalDBOptions
- return dbDump(&opts)
- },
- }
- dbSetupFlags(cmd.Flags(), &globalDBOptions)
- return cmd
-}
-
-func dbDump(opts *dbOptions) error {
- issues := []*Issue{}
- if err := db.Find(&issues).Error; err != nil {
- return err
- }
-
- for _, issue := range issues {
- issue.PostLoad()
- }
-
- out, err := json.MarshalIndent(issues, "", " ")
- if err != nil {
- return err
- }
- fmt.Println(string(out))
- return nil
-}
-
-func loadIssues(targets []string) (Issues, error) {
- query := db.Model(Issue{}).Order("created_at")
- if len(targets) > 0 {
- return nil, fmt.Errorf("not implemented")
- // query = query.Where("repo_url IN (?)", canonicalTargets(targets))
- // OR WHERE parents IN ....
- // etc
- }
-
- perPage := 100
- var issues []*Issue
- for page := 0; ; page++ {
- var newIssues []*Issue
- if err := query.Limit(perPage).Offset(perPage * page).Find(&newIssues).Error; err != nil {
- return nil, err
- }
- issues = append(issues, newIssues...)
- if len(newIssues) < perPage {
- break
- }
- }
-
- for _, issue := range issues {
- issue.PostLoad()
- }
-
- return Issues(issues), nil
-}
-
-// FIXME: try to use gorm hooks to auto preload/postload items
diff --git a/cmd_pull.go b/cmd_pull.go
deleted file mode 100644
index f3c1ad1da..000000000
--- a/cmd_pull.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "os"
- "sync"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
- "github.com/spf13/viper"
- "go.uber.org/zap"
-)
-
-type pullOptions struct {
- // pull
- GithubToken string `mapstructure:"github-token"`
- GitlabToken string `mapstructure:"gitlab-token"`
- // includeExternalDeps bool
-
- Targets Targets `mapstructure:"targets"`
-}
-
-var globalPullOptions pullOptions
-
-func (opts pullOptions) String() string {
- out, _ := json.Marshal(opts)
- return string(out)
-}
-
-func pullSetupFlags(flags *pflag.FlagSet, opts *pullOptions) {
- flags.StringVarP(&opts.GithubToken, "github-token", "", "", "GitHub Token with 'issues' access")
- flags.StringVarP(&opts.GitlabToken, "gitlab-token", "", "", "GitLab Token with 'issues' access")
- viper.BindPFlags(flags)
-}
-
-func newPullCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "pull",
- RunE: func(cmd *cobra.Command, args []string) error {
- opts := globalPullOptions
- var err error
- if opts.Targets, err = ParseTargets(args); err != nil {
- return errors.Wrap(err, "invalid targets")
- }
- return pullAndCompute(&opts)
- },
- }
- pullSetupFlags(cmd.Flags(), &globalPullOptions)
- return cmd
-}
-
-func pullAndCompute(opts *pullOptions) error {
- if os.Getenv("DEPVIZ_NOPULL") != "1" {
- if err := pull(opts); err != nil {
- return errors.Wrap(err, "failed to pull")
- }
- }
- if err := compute(opts); err != nil {
- return errors.Wrap(err, "failed to compute")
- }
- return nil
-}
-
-func pull(opts *pullOptions) error {
- // FIXME: handle the special '@me' target
- logger().Debug("pull", zap.Stringer("opts", *opts))
-
- var (
- wg sync.WaitGroup
- allIssues []*Issue
- out = make(chan []*Issue, 100)
- )
-
- targets := opts.Targets.UniqueProjects()
-
- // parallel fetches
- wg.Add(len(targets))
- for _, target := range targets {
- switch target.Driver() {
- case GithubDriver:
- go githubPull(target, &wg, opts, out)
- case GitlabDriver:
- go gitlabPull(target, &wg, opts, out)
- default:
- panic("should not happen")
- }
- }
- wg.Wait()
- close(out)
- for issues := range out {
- allIssues = append(allIssues, issues...)
- }
-
- // save
- for _, issue := range allIssues {
- if err := db.Save(issue).Error; err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/cmd_run.go b/cmd_run.go
deleted file mode 100644
index 715bbf40d..000000000
--- a/cmd_run.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package main
-
-import (
- "encoding/json"
-
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
- "github.com/spf13/pflag"
- "github.com/spf13/viper"
-)
-
-type runOptions struct {
- GraphOptions graphOptions `mapstructure:"graph"`
- PullOptions pullOptions `mapstructure:"pull"`
- AdditionalPulls []string `mapstructure:"additional-pulls"`
- NoPull bool `mapstructure:"no-pull"`
-}
-
-var globalRunOptions runOptions
-
-func (opts runOptions) String() string {
- out, _ := json.Marshal(opts)
- return string(out)
-}
-
-func runSetupFlags(flags *pflag.FlagSet, opts *runOptions) {
- flags.BoolVarP(&opts.NoPull, "no-pull", "", false, "do not pull new issues before running")
- flags.StringSliceVarP(&opts.AdditionalPulls, "additional-pulls", "", []string{}, "additional pull that won't necessarily be displayed on the graph")
- viper.BindPFlags(flags)
-}
-
-func newRunCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "run",
- RunE: func(cmd *cobra.Command, args []string) error {
- opts := globalRunOptions
- opts.GraphOptions = globalGraphOptions
- opts.PullOptions = globalPullOptions
-
- targets, err := ParseTargets(args)
- if err != nil {
- return errors.Wrap(err, "invalid targets")
- }
- additionalPulls, err := ParseTargets(opts.AdditionalPulls)
- if err != nil {
- return errors.Wrap(err, "invalid targets")
- }
- opts.PullOptions.Targets = append(targets, additionalPulls...)
- opts.GraphOptions.Targets = targets
- return run(&opts)
- },
- }
- runSetupFlags(cmd.Flags(), &globalRunOptions)
- graphSetupFlags(cmd.Flags(), &globalGraphOptions)
- pullSetupFlags(cmd.Flags(), &globalPullOptions)
- return cmd
-}
-
-func run(opts *runOptions) error {
- if !opts.NoPull {
- if err := pullAndCompute(&opts.PullOptions); err != nil {
- return errors.Wrap(err, "failed to pull")
- }
- }
- if err := graph(&opts.GraphOptions); err != nil {
- return errors.Wrap(err, "failed to graph")
- }
- return nil
-}
diff --git a/compute.go b/compute.go
deleted file mode 100644
index 57f3b6a96..000000000
--- a/compute.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package main
-
-import (
- "fmt"
- "regexp"
- "sort"
- "strconv"
-
- "go.uber.org/zap"
-)
-
-var (
- childrenRegex, _ = regexp.Compile(`(?i)(require|requires|blocked by|block by|depend on|depends on|parent of) ([a-z0-9:/_.-]+issues/[0-9]+|[a-z0-9:/_.-]+#[0-9]+|[a-z0-9/_-]*#[0-9]+)`)
- parentsRegex, _ = regexp.Compile(`(?i)(blocks|block|address|addresses|part of|child of|fix|fixes) ([a-z0-9:/_.-]+issues/[0-9]+|[a-z0-9:/_.-]+#[0-9]+|[a-z0-9/_-]*#[0-9]+)`)
- isDuplicateRegex, _ = regexp.Compile(`(?i)(duplicates|duplicate|dup of|dup|duplicate of) ([a-z0-9:/_.-]+issues/[0-9]+|[a-z0-9:/_.-]+#[0-9]+|[a-z0-9/_-]*#[0-9]+)`)
- //weightMultiplierRegex, _ = regexp.Compile(`(?i)(depviz.weight_multiplier[:= ]+)([0-9]+)`)
- weightRegex, _ = regexp.Compile(`(?i)(depviz.base_weight|depviz.weight)[:= ]+([0-9]+)`)
- hideRegex, _ = regexp.Compile(`(?i)(depviz.hide)`) // FIXME: use label
-)
-
-func compute(opts *pullOptions) error {
- logger().Debug("compute", zap.Stringer("opts", *opts))
- issues, err := loadIssues(nil)
- if err != nil {
- return err
- }
-
- for _, issue := range issues {
- // reset default values
- issue.Errors = []string{}
- issue.Parents = []*Issue{}
- issue.Children = []*Issue{}
- issue.Duplicates = []*Issue{}
- issue.Weight = 0
- issue.IsHidden = false
- issue.IsEpic = false
- issue.HasEpic = false
- issue.IsOrphan = true
- }
-
- for _, issue := range issues {
- if issue.Body == "" {
- continue
- }
-
- // is epic
- for _, label := range issue.Labels {
- // FIXME: get epic labels dynamically based on a configuration filein the repo
- if label.Name == "epic" || label.Name == "t/epic" {
- issue.IsEpic = true
- }
- }
-
- // hidden
- if match := hideRegex.FindStringSubmatch(issue.Body); match != nil {
- issue.IsHidden = true
- continue
- }
-
- // duplicates
- if match := isDuplicateRegex.FindStringSubmatch(issue.Body); match != nil {
- canonical := issue.GetRelativeURL(match[len(match)-1])
- rel := issues.Get(canonical)
- if rel == nil {
- issue.Errors = append(issue.Errors, fmt.Errorf("duplicate %q not found", canonical).Error())
- continue
- }
- issue.Duplicates = append(issue.Duplicates, rel)
- issue.IsHidden = true
- continue
- }
-
- // weight
- if match := weightRegex.FindStringSubmatch(issue.Body); match != nil {
- issue.Weight, _ = strconv.Atoi(match[len(match)-1])
- }
-
- // children
- for _, match := range childrenRegex.FindAllStringSubmatch(issue.Body, -1) {
- canonical := issue.GetRelativeURL(match[len(match)-1])
- child := issues.Get(canonical)
- if child == nil {
- issue.Errors = append(issue.Errors, fmt.Errorf("children %q not found", canonical).Error())
- continue
- }
- issue.Children = append(issue.Children, child)
- issue.IsOrphan = false
- child.Parents = append(child.Parents, issue)
- child.IsOrphan = false
- }
-
- // parents
- for _, match := range parentsRegex.FindAllStringSubmatch(issue.Body, -1) {
- canonical := issue.GetRelativeURL(match[len(match)-1])
- parent := issues.Get(canonical)
- if parent == nil {
- issue.Errors = append(issue.Errors, fmt.Errorf("parent %q not found", canonical).Error())
- continue
- }
- issue.Parents = append(issue.Parents, parent)
- issue.IsOrphan = false
- parent.Children = append(parent.Children, issue)
- parent.IsOrphan = false
- }
- }
-
- for _, issue := range issues {
- if issue.IsEpic {
- issue.HasEpic = true
- continue
- }
- // has epic
- issue.HasEpic, err = issue.computeHasEpic(0)
- if err != nil {
- issue.Errors = append(issue.Errors, err.Error())
- }
- }
-
- for _, issue := range issues {
- issue.PostLoad()
-
- issue.ParentIDs = uniqueStrings(issue.ParentIDs)
- sort.Strings(issue.ParentIDs)
- issue.ChildIDs = uniqueStrings(issue.ChildIDs)
- sort.Strings(issue.ChildIDs)
- issue.DuplicateIDs = uniqueStrings(issue.DuplicateIDs)
- sort.Strings(issue.DuplicateIDs)
- }
-
- for _, issue := range issues {
- // TODO: add a "if changed" to preserve some CPU and time
- if err := db.Set("gorm:association_autoupdate", false).Save(issue).Error; err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (i Issue) computeHasEpic(depth int) (bool, error) {
- if depth > 100 {
- return false, fmt.Errorf("very high blocking depth (>100), do not continue. (issue=%s)", i.URL)
- }
- if i.IsHidden {
- return false, nil
- }
- for _, parent := range i.Parents {
- if parent.IsEpic {
- return true, nil
- }
- parentHasEpic, err := parent.computeHasEpic(depth + 1)
- if err != nil {
- return false, nil
- }
- if parentHasEpic {
- return true, nil
- }
- }
- return false, nil
-}
diff --git a/go.mod b/go.mod
index 2ee65aab4..093f4ccd4 100644
--- a/go.mod
+++ b/go.mod
@@ -13,6 +13,8 @@ require (
github.com/go-sql-driver/mysql v1.4.1 // indirect
github.com/google/go-cmp v0.2.0 // indirect
github.com/google/go-github v17.0.0+incompatible
+ github.com/google/pprof v0.0.0-20181127221834-b4f47329b966 // indirect
+ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/gorm v1.9.1
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
@@ -29,6 +31,7 @@ require (
github.com/xanzy/go-gitlab v0.11.7
go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277 // indirect
go.uber.org/zap v1.9.1
+ golang.org/x/arch v0.0.0-20180920145803-b19384d3c130 // indirect
golang.org/x/crypto v0.0.0-20181126163421-e657309f52e7 // indirect
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect
golang.org/x/oauth2 v0.0.0-20181120190819-8f65e3013eba
diff --git a/go.sum b/go.sum
index fe383ef45..ed651a553 100644
--- a/go.sum
+++ b/go.sum
@@ -38,8 +38,12 @@ github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4r
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/pprof v0.0.0-20181127221834-b4f47329b966 h1:zpjeU3rN5R22t0iguDarIAL75+2acLnDqGLOiPttMjk=
+github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jinzhu/gorm v1.9.1 h1:lDSDtsCt5AGGSKTs8AHlSDbbgif4G4+CKJ8ETBDVHTA=
@@ -100,10 +104,13 @@ go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277 h1:d9qaMM+ODpCq+9We41//
go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/arch v0.0.0-20180920145803-b19384d3c130 h1:Vsc61gop4hfHdzQNolo6Fi/sw7TnJ2yl3ZR4i7bYirs=
+golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8=
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc=
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e h1:IzypfodbhbnViNUO/MEh0FzCUooG97cIGfdggUrUSyU=
golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181126163421-e657309f52e7 h1:70UTJTdHsz+jRjphEW+is2SdxjhZL1AdKsewqjYzcQU=
golang.org/x/crypto v0.0.0-20181126163421-e657309f52e7/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181003013248-f5e5bdd77824 h1:MkjFNbaZJyH98M67Q3umtwZ+EdVdrNJLqSwZp5vcv60=
diff --git a/issue.go b/issue.go
deleted file mode 100644
index 92e5b490e..000000000
--- a/issue.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package main
-
-import (
- "fmt"
- "log"
- "strings"
-)
-
-func (i Issue) GetRelativeURL(target string) string {
- if strings.Contains(target, "://") {
- return normalizeURL(target)
- }
-
- if target[0] == '#' {
- return fmt.Sprintf("%s/issues/%s", i.Repository.URL, target[1:])
- }
-
- target = strings.Replace(target, "#", "/issues/", -1)
-
- parts := strings.Split(target, "/")
- if strings.Contains(parts[0], ".") && isDNSName(parts[0]) {
- return fmt.Sprintf("https://%s", target)
- }
-
- return fmt.Sprintf("%s/%s", strings.TrimRight(i.Repository.Provider.URL, "/"), target)
-}
-
-func (i *Issue) PostLoad() {
- i.ParentIDs = []string{}
- i.ChildIDs = []string{}
- i.DuplicateIDs = []string{}
- for _, rel := range i.Parents {
- i.ParentIDs = append(i.ParentIDs, rel.ID)
- }
- for _, rel := range i.Children {
- i.ChildIDs = append(i.ChildIDs, rel.ID)
- }
- for _, rel := range i.Duplicates {
- i.DuplicateIDs = append(i.DuplicateIDs, rel.ID)
- }
-}
-
-func (i Issue) IsClosed() bool {
- return i.State == "closed"
-}
-
-func (i Issue) IsReady() bool {
- return !i.IsOrphan && len(i.Parents) == 0 // FIXME: switch parents with children?
-}
-
-func (i Issue) MatchesWithATarget(targets Targets) bool {
- return i.matchesWithATarget(targets, 0)
-}
-
-func (i Issue) matchesWithATarget(targets Targets, depth int) bool {
- if depth > 100 {
- log.Printf("circular dependency or too deep graph (>100), skipping this node. (issue=%s)", i)
- return false
- }
-
- for _, target := range targets {
- if target.Issue() != "" { // issue-mode
- if target.Canonical() == i.URL {
- return true
- }
- } else { // project-mode
- if i.RepositoryID == target.ProjectURL() {
- return true
- }
- }
- }
-
- for _, parent := range i.Parents {
- if parent.matchesWithATarget(targets, depth+1) {
- return true
- }
- }
-
- for _, child := range i.Children {
- if child.matchesWithATarget(targets, depth+1) {
- return true
- }
- }
-
- return false
-}
diff --git a/issues.go b/issues.go
deleted file mode 100644
index a3b03d7d3..000000000
--- a/issues.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package main
-
-type Issues []*Issue
-
-func (issues Issues) Get(id string) *Issue {
- for _, issue := range issues {
- if issue.ID == id {
- return issue
- }
- }
- return nil
-}
-
-func (issues Issues) FilterByTargets(targets []Target) Issues {
- filtered := Issues{}
-
- for _, issue := range issues {
- if issue.MatchesWithATarget(targets) {
- filtered = append(filtered, issue)
- }
- }
-
- return filtered
-}
diff --git a/logger.go b/logger.go
deleted file mode 100644
index acec18d09..000000000
--- a/logger.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package main
-
-import "go.uber.org/zap"
-
-func logger() *zap.Logger {
- return zap.L()
-}
diff --git a/main.go b/main.go
index 53d5c958d..c8a5d36cb 100644
--- a/main.go
+++ b/main.go
@@ -2,133 +2,18 @@ package main // import "moul.io/depviz"
import (
"fmt"
- "io/ioutil"
- "log"
"os"
- "strings"
- "github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
- "github.com/spf13/viper"
"go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "moul.io/zapgorm"
+ "moul.io/depviz/cli"
)
func main() {
- defer logger().Sync()
- rootCmd := newRootCommand()
+ defer zap.L().Sync()
+ rootCmd := cli.NewRootCommand()
if err := rootCmd.Execute(); err != nil {
_, _ = fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
-
-var (
- verbose bool
- cfgFile string
- dbPath string
- db *gorm.DB
-)
-
-func newRootCommand() *cobra.Command {
- cmd := &cobra.Command{
- Use: "depviz",
- }
- cmd.PersistentFlags().BoolP("help", "h", false, "print usage")
- cmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose mode")
- cmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file (default is ./.depviz.yml)")
- cmd.PersistentFlags().StringVarP(&dbPath, "db-path", "", "$HOME/.depviz.db", "database path")
-
- cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
- // configure zap
- config := zap.NewDevelopmentConfig()
- if verbose {
- config.Level.SetLevel(zapcore.DebugLevel)
- } else {
- config.Level.SetLevel(zapcore.InfoLevel)
- }
- config.DisableStacktrace = true
- config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
- l, err := config.Build()
- if err != nil {
- return err
- }
- zap.ReplaceGlobals(l)
-
- // configure viper
- if cfgFile != "" {
- viper.SetConfigFile(cfgFile)
- } else {
- viper.AddConfigPath(".")
- viper.SetConfigName(".depviz")
- }
- if err := viper.MergeInConfig(); err != nil {
- if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
- return errors.Wrap(err, "cannot read config")
- }
- }
-
- // fill global options
- if err := viper.Unmarshal(&globalGraphOptions); err != nil {
- return err
- }
- if err := viper.Unmarshal(&globalRunOptions); err != nil {
- return err
- }
- if err := viper.Unmarshal(&globalPullOptions); err != nil {
- return err
- }
- if err := viper.Unmarshal(&globalWebOptions); err != nil {
- return err
- }
- if err := viper.Unmarshal(&globalAirtableOptions); err != nil {
- return err
- }
- if err := viper.Unmarshal(&globalDBOptions); err != nil {
- return err
- }
-
- // configure sql
- dbPath = os.ExpandEnv(dbPath)
- db, err = gorm.Open("sqlite3", dbPath)
- if err != nil {
- return err
- }
- log.SetOutput(ioutil.Discard)
- db.Callback().Create().Remove("gorm:update_time_stamp")
- db.Callback().Update().Remove("gorm:update_time_stamp")
- log.SetOutput(os.Stderr)
- db.SetLogger(zapgorm.New(zap.L().Named("vendor.gorm")))
- db = db.Set("gorm:auto_preload", true)
- db = db.Set("gorm:association_autoupdate", true)
- db.BlockGlobalUpdate(true)
- db.SingularTable(true)
- db.LogMode(verbose)
- if err := db.AutoMigrate(
- Issue{},
- Label{},
- Account{},
- Milestone{},
- Repository{},
- Provider{},
- ).Error; err != nil {
- return err
- }
-
- return nil
- }
- cmd.AddCommand(
- newPullCommand(),
- newDBCommand(),
- newAirtableCommand(),
- newGraphCommand(),
- newRunCommand(),
- newWebCommand(),
- )
- viper.AutomaticEnv()
- viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
- return cmd
-}
diff --git a/models_airtable.go b/models_airtable.go
deleted file mode 100644
index b87d39cd7..000000000
--- a/models_airtable.go
+++ /dev/null
@@ -1,610 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "strings"
- "time"
-
- "github.com/brianloveswords/airtable"
-)
-
-type AirtableBase struct {
- ID string `json:"id"`
- CreatedAt time.Time `json:"created-at"`
- UpdatedAt time.Time `json:"updated-at"`
- Errors string `json:"errors"`
-}
-
-type airtableState int
-
-type AirtableRecords []interface{}
-
-type AirtableEntry interface {
- ToRecord(cache AirtableDB) interface{}
-}
-
-const (
- airtableStateUnknown airtableState = iota
- airtableStateUnchanged
- airtableStateChanged
- airtableStateNew
-)
-
-var (
- airtableStateString = map[airtableState]string{
- airtableStateUnknown: "unknown",
- airtableStateUnchanged: "unchanged",
- airtableStateChanged: "changed",
- airtableStateNew: "new",
- }
-)
-
-//
-// provider
-//
-
-type ProviderRecord struct {
- State airtableState `json:"-"` // internal
-
- airtable.Record // provides ID, CreatedTime
- Fields struct {
- // base
- AirtableBase
-
- // specific
- URL string `json:"url"`
- Driver string `json:"driver"`
-
- // relationship
- // n/a
- } `json:"fields,omitempty"`
-}
-
-func (r ProviderRecord) String() string {
- out, _ := json.Marshal(r)
- return string(out)
-}
-
-func (p Provider) ToRecord(cache AirtableDB) *ProviderRecord {
- record := ProviderRecord{}
-
- // base
- record.Fields.ID = p.ID
- record.Fields.CreatedAt = p.CreatedAt
- record.Fields.UpdatedAt = p.UpdatedAt
- record.Fields.Errors = strings.Join(p.Errors, ", ")
-
- // specific
- record.Fields.URL = p.URL
- record.Fields.Driver = p.Driver
-
- // relationships
- // n/a
-
- return &record
-}
-
-func (r *ProviderRecord) Equals(n *ProviderRecord) bool {
- return true &&
- // base
- r.Fields.ID == n.Fields.ID &&
- isSameAirtableDate(r.Fields.CreatedAt, n.Fields.CreatedAt) &&
- isSameAirtableDate(r.Fields.UpdatedAt, n.Fields.UpdatedAt) &&
- r.Fields.Errors == n.Fields.Errors &&
-
- // specific
- r.Fields.URL == n.Fields.URL &&
- r.Fields.Driver == n.Fields.Driver &&
-
- // relationships
- // n/a
-
- true
-}
-
-type ProviderRecords []ProviderRecord
-
-func (records ProviderRecords) ByID(id string) string {
- for _, record := range records {
- if record.Fields.ID == id {
- return record.ID
- }
- }
- return ""
-}
-
-//
-// label
-//
-
-type LabelRecord struct {
- State airtableState `json:"-"` // internal
-
- airtable.Record // provides ID, CreatedTime
- Fields struct {
- // base
- AirtableBase
-
- // specific
- URL string `json:"url"`
- Name string `json:"name"`
- Color string `json:"color"`
- Description string `json:"description"`
-
- // relationship
- // n/a
- } `json:"fields,omitempty"`
-}
-
-func (r LabelRecord) String() string {
- out, _ := json.Marshal(r)
- return string(out)
-}
-
-func (p Label) ToRecord(cache AirtableDB) *LabelRecord {
- record := LabelRecord{}
-
- // base
- record.Fields.ID = p.ID
- record.Fields.CreatedAt = p.CreatedAt
- record.Fields.UpdatedAt = p.UpdatedAt
- record.Fields.Errors = strings.Join(p.Errors, ", ")
-
- // specific
- record.Fields.URL = p.URL
- record.Fields.Name = p.Name
- record.Fields.Color = p.Color
- record.Fields.Description = p.Description
-
- // relationships
- // n/a
-
- return &record
-}
-
-func (r *LabelRecord) Equals(n *LabelRecord) bool {
- return true &&
- // base
- r.Fields.ID == n.Fields.ID &&
- isSameAirtableDate(r.Fields.CreatedAt, n.Fields.CreatedAt) &&
- isSameAirtableDate(r.Fields.UpdatedAt, n.Fields.UpdatedAt) &&
- r.Fields.Errors == n.Fields.Errors &&
-
- // specific
- r.Fields.URL == n.Fields.URL &&
- r.Fields.Name == n.Fields.Name &&
- r.Fields.Color == n.Fields.Color &&
- r.Fields.Description == n.Fields.Description &&
-
- // relationships
- // n/a
-
- true
-}
-
-type LabelRecords []LabelRecord
-
-func (records LabelRecords) ByID(id string) string {
- for _, record := range records {
- if record.Fields.ID == id {
- return record.ID
- }
- }
- return ""
-}
-
-//
-// account
-//
-
-type AccountRecord struct {
- State airtableState `json:"-"` // internal
-
- airtable.Record // provides ID, CreatedTime
- Fields struct {
- // base
- AirtableBase
-
- // specific
- URL string `json:"url"`
- Login string `json:"login"`
- FullName string `json:"fullname"`
- Type string `json:"type"`
- Bio string `json:"bio"`
- Location string `json:"location"`
- Company string `json:"company"`
- Blog string `json:"blog"`
- Email string `json:"email"`
- AvatarURL string `json:"avatar-url"`
-
- // relationships
- Provider []string `json:"provider"`
- } `json:"fields,omitempty"`
-}
-
-func (r AccountRecord) String() string {
- out, _ := json.Marshal(r)
- return string(out)
-}
-
-func (p Account) ToRecord(cache AirtableDB) *AccountRecord {
- record := AccountRecord{}
- // base
- record.Fields.ID = p.ID
- record.Fields.CreatedAt = p.CreatedAt
- record.Fields.UpdatedAt = p.UpdatedAt
- record.Fields.Errors = strings.Join(p.Errors, ", ")
-
- // specific
- record.Fields.URL = p.URL
- record.Fields.Login = p.Login
- record.Fields.FullName = p.FullName
- record.Fields.Type = p.Type
- record.Fields.Bio = p.Bio
- record.Fields.Location = p.Location
- record.Fields.Company = p.Company
- record.Fields.Blog = p.Blog
- record.Fields.Email = p.Email
- record.Fields.AvatarURL = p.AvatarURL
-
- // relationships
- record.Fields.Provider = []string{cache.Providers.ByID(p.Provider.ID)}
-
- return &record
-}
-
-func (r *AccountRecord) Equals(n *AccountRecord) bool {
- return true &&
-
- // base
- r.Fields.ID == n.Fields.ID &&
- isSameAirtableDate(r.Fields.CreatedAt, n.Fields.CreatedAt) &&
- isSameAirtableDate(r.Fields.UpdatedAt, n.Fields.UpdatedAt) &&
- r.Fields.Errors == n.Fields.Errors &&
-
- // specific
- r.Fields.URL == n.Fields.URL &&
- r.Fields.Login == n.Fields.Login &&
- r.Fields.FullName == n.Fields.FullName &&
- r.Fields.Type == n.Fields.Type &&
- r.Fields.Bio == n.Fields.Bio &&
- r.Fields.Location == n.Fields.Location &&
- r.Fields.Company == n.Fields.Company &&
- r.Fields.Blog == n.Fields.Blog &&
- r.Fields.Email == n.Fields.Email &&
- r.Fields.AvatarURL == n.Fields.AvatarURL &&
-
- // relationships
- isSameStringSlice(r.Fields.Provider, n.Fields.Provider) &&
-
- true
-}
-
-type AccountRecords []AccountRecord
-
-func (records AccountRecords) ByID(id string) string {
- for _, record := range records {
- if record.Fields.ID == id {
- return record.ID
- }
- }
- return ""
-}
-
-//
-// repository
-//
-
-type RepositoryRecord struct {
- State airtableState `json:"-"` // internal
-
- airtable.Record // provides ID, CreatedTime
- Fields struct {
- // base
- AirtableBase
-
- // specific
- URL string `json:"url"`
- Title string `json:"title"`
- Description string `json:"description"`
- Homepage string `json:"homepage"`
- PushedAt time.Time `json:"pushed-at"`
- IsFork bool `json:"is-fork"`
-
- // relationships
- Provider []string `json:"provider"`
- Owner []string `json:"owner"`
- } `json:"fields,omitempty"`
-}
-
-func (r RepositoryRecord) String() string {
- out, _ := json.Marshal(r)
- return string(out)
-}
-
-func (p Repository) ToRecord(cache AirtableDB) *RepositoryRecord {
- record := RepositoryRecord{}
-
- // base
- record.Fields.ID = p.ID
- record.Fields.CreatedAt = p.CreatedAt
- record.Fields.UpdatedAt = p.UpdatedAt
- record.Fields.Errors = strings.Join(p.Errors, ", ")
-
- // specific
- record.Fields.URL = p.URL
- record.Fields.Title = p.Title
- record.Fields.Description = p.Description
- record.Fields.Homepage = p.Homepage
- record.Fields.PushedAt = p.PushedAt
- record.Fields.IsFork = p.IsFork
-
- // relationships
- record.Fields.Provider = []string{cache.Providers.ByID(p.Provider.ID)}
- if p.Owner != nil {
- record.Fields.Owner = []string{cache.Accounts.ByID(p.Owner.ID)}
- }
-
- return &record
-}
-
-func (r *RepositoryRecord) Equals(n *RepositoryRecord) bool {
- return true &&
-
- // base
- r.Fields.ID == n.Fields.ID &&
- isSameAirtableDate(r.Fields.CreatedAt, n.Fields.CreatedAt) &&
- isSameAirtableDate(r.Fields.UpdatedAt, n.Fields.UpdatedAt) &&
- r.Fields.Errors == n.Fields.Errors &&
-
- // specific
- r.Fields.URL == n.Fields.URL &&
- r.Fields.Title == n.Fields.Title &&
- r.Fields.Description == n.Fields.Description &&
- r.Fields.Homepage == n.Fields.Homepage &&
- isSameAirtableDate(r.Fields.PushedAt, n.Fields.PushedAt) &&
- r.Fields.IsFork == n.Fields.IsFork &&
-
- // relationships
- isSameStringSlice(r.Fields.Provider, n.Fields.Provider) &&
- isSameStringSlice(r.Fields.Owner, n.Fields.Owner) &&
-
- true
-}
-
-type RepositoryRecords []RepositoryRecord
-
-func (records RepositoryRecords) ByID(id string) string {
- for _, record := range records {
- if record.Fields.ID == id {
- return record.ID
- }
- }
- return ""
-}
-
-//
-// milestone
-//
-
-type MilestoneRecord struct {
- State airtableState `json:"-"` // internal
-
- airtable.Record // provides ID, CreatedTime
- Fields struct {
- // base
- AirtableBase
-
- // specific
- URL string `json:"url"`
- Title string `json:"title"`
- Description string `json:"description"`
- ClosedAt time.Time `json:"closed-at"`
- DueOn time.Time `json:"due-on"`
-
- // relationships
- Creator []string `json:"creator"`
- Repository []string `json:"repository"`
- } `json:"fields,omitempty"`
-}
-
-func (r MilestoneRecord) String() string {
- out, _ := json.Marshal(r)
- return string(out)
-}
-
-func (p Milestone) ToRecord(cache AirtableDB) *MilestoneRecord {
- record := MilestoneRecord{}
- // base
- record.Fields.ID = p.ID
- record.Fields.CreatedAt = p.CreatedAt
- record.Fields.UpdatedAt = p.UpdatedAt
- record.Fields.Errors = strings.Join(p.Errors, ", ")
-
- // specific
- record.Fields.URL = p.URL
- record.Fields.Title = p.Title
- record.Fields.Description = p.Description
- record.Fields.ClosedAt = p.ClosedAt
- record.Fields.DueOn = p.DueOn
-
- // relationships
- if p.Creator != nil {
- record.Fields.Creator = []string{cache.Accounts.ByID(p.Creator.ID)}
- }
- if p.Repository != nil {
- record.Fields.Repository = []string{cache.Repositories.ByID(p.Repository.ID)}
- }
-
- return &record
-}
-
-func (r *MilestoneRecord) Equals(n *MilestoneRecord) bool {
- return true &&
-
- // base
- r.Fields.ID == n.Fields.ID &&
- isSameAirtableDate(r.Fields.CreatedAt, n.Fields.CreatedAt) &&
- isSameAirtableDate(r.Fields.UpdatedAt, n.Fields.UpdatedAt) &&
- r.Fields.Errors == n.Fields.Errors &&
-
- // specific
- r.Fields.URL == n.Fields.URL &&
- r.Fields.Title == n.Fields.Title &&
- r.Fields.Description == n.Fields.Description &&
- isSameAirtableDate(r.Fields.ClosedAt, n.Fields.ClosedAt) &&
- isSameAirtableDate(r.Fields.DueOn, n.Fields.DueOn) &&
-
- // relationships
- isSameStringSlice(r.Fields.Creator, n.Fields.Creator) &&
- isSameStringSlice(r.Fields.Repository, n.Fields.Repository) &&
-
- true
-}
-
-type MilestoneRecords []MilestoneRecord
-
-func (records MilestoneRecords) ByID(id string) string {
- for _, record := range records {
- if record.Fields.ID == id {
- return record.ID
- }
- }
- return ""
-}
-
-//
-// issue
-//
-
-type IssueRecord struct {
- State airtableState `json:"-"` // internal
-
- airtable.Record // provides ID, CreatedTime
- Fields struct {
- // base
- AirtableBase
-
- // specific
- URL string `json:"url"`
- CompletedAt time.Time `json:"completed-at"`
- Title string `json:"title"`
- State string `json:"state"`
- Body string `json:"body"`
- IsPR bool `json:"is-pr"`
- IsLocked bool `json:"is-locked"`
- Comments int `json:"comments"`
- Upvotes int `json:"upvotes"`
- Downvotes int `json:"downvotes"`
- IsOrphan bool `json:"is-orphan"`
- IsHidden bool `json:"is-hidden"`
- Weight int `json:"weight"`
- IsEpic bool `json:"is-epic"`
- HasEpic bool `json:"has-epic"`
-
- // relationships
- Repository []string `json:"repository"`
- Milestone []string `json:"milestone"`
- Author []string `json:"author"`
- Labels []string `json:"labels"`
- Assignees []string `json:"assignees"`
- //Parents []string `json:"-"`
- //Children []string `json:"-"`
- //Duplicates []string `json:"-"`
- } `json:"fields,omitempty"`
-}
-
-func (r IssueRecord) String() string {
- out, _ := json.Marshal(r)
- return string(out)
-}
-
-func (p Issue) ToRecord(cache AirtableDB) *IssueRecord {
- record := IssueRecord{}
- // base
- record.Fields.ID = p.ID
- record.Fields.CreatedAt = p.CreatedAt
- record.Fields.UpdatedAt = p.UpdatedAt
- record.Fields.Errors = strings.Join(p.Errors, ", ")
-
- // specific
- record.Fields.URL = p.URL
- record.Fields.CompletedAt = p.CompletedAt
- record.Fields.Title = p.Title
- record.Fields.State = p.State
- record.Fields.Body = p.Body
- record.Fields.IsPR = p.IsPR
- record.Fields.IsLocked = p.IsLocked
- record.Fields.Comments = p.Comments
- record.Fields.Upvotes = p.Upvotes
- record.Fields.Downvotes = p.Downvotes
- record.Fields.IsOrphan = p.IsOrphan
- record.Fields.IsHidden = p.IsHidden
- record.Fields.Weight = p.Weight
- record.Fields.IsEpic = p.IsEpic
- record.Fields.HasEpic = p.HasEpic
-
- // relationships
- record.Fields.Repository = []string{cache.Repositories.ByID(p.Repository.ID)}
- if p.Milestone != nil {
- record.Fields.Milestone = []string{cache.Milestones.ByID(p.Milestone.ID)}
- }
- record.Fields.Author = []string{cache.Accounts.ByID(p.Author.ID)}
- record.Fields.Labels = []string{}
- for _, label := range p.Labels {
- record.Fields.Labels = append(record.Fields.Labels, cache.Labels.ByID(label.ID))
- }
- record.Fields.Assignees = []string{}
- for _, assignee := range p.Assignees {
- record.Fields.Assignees = append(record.Fields.Assignees, cache.Accounts.ByID(assignee.ID))
- }
-
- return &record
-}
-
-func (r *IssueRecord) Equals(n *IssueRecord) bool {
- return true &&
-
- // base
- r.Fields.ID == n.Fields.ID &&
- isSameAirtableDate(r.Fields.CreatedAt, n.Fields.CreatedAt) &&
- isSameAirtableDate(r.Fields.UpdatedAt, n.Fields.UpdatedAt) &&
- r.Fields.Errors == n.Fields.Errors &&
-
- // specific
- r.Fields.URL == n.Fields.URL &&
- isSameAirtableDate(r.Fields.CompletedAt, n.Fields.CompletedAt) &&
- r.Fields.Title == n.Fields.Title &&
- r.Fields.State == n.Fields.State &&
- r.Fields.Body == n.Fields.Body &&
- r.Fields.IsPR == n.Fields.IsPR &&
- r.Fields.IsLocked == n.Fields.IsLocked &&
- r.Fields.Comments == n.Fields.Comments &&
- r.Fields.Upvotes == n.Fields.Upvotes &&
- r.Fields.Downvotes == n.Fields.Downvotes &&
- r.Fields.IsOrphan == n.Fields.IsOrphan &&
- r.Fields.IsHidden == n.Fields.IsHidden &&
- r.Fields.Weight == n.Fields.Weight &&
- r.Fields.IsEpic == n.Fields.IsEpic &&
- r.Fields.HasEpic == n.Fields.HasEpic &&
-
- // relationships
- isSameStringSlice(r.Fields.Repository, n.Fields.Repository) &&
- isSameStringSlice(r.Fields.Milestone, n.Fields.Milestone) &&
- isSameStringSlice(r.Fields.Author, n.Fields.Author) &&
- isSameStringSlice(r.Fields.Labels, n.Fields.Labels) &&
- isSameStringSlice(r.Fields.Assignees, n.Fields.Assignees) &&
-
- true
-}
-
-type IssueRecords []IssueRecord
-
-func (records IssueRecords) ByID(id string) string {
- for _, record := range records {
- if record.Fields.ID == id {
- return record.ID
- }
- }
- return ""
-}
diff --git a/pkg/airtabledb/airtabledb.go b/pkg/airtabledb/airtabledb.go
new file mode 100644
index 000000000..015d8ad09
--- /dev/null
+++ b/pkg/airtabledb/airtabledb.go
@@ -0,0 +1,405 @@
+package airtabledb
+
+import (
+ "encoding/json"
+ "reflect"
+ "sort"
+ "time"
+
+ "github.com/brianloveswords/airtable"
+)
+
+type Record interface {
+ String() string
+}
+
+func (t Table) RecordsEqual(idx int, b Record) bool {
+ sf, ok := reflect.TypeOf(t.Get(idx)).FieldByName("Fields")
+ if !ok {
+ panic("No struct field Fields in Record")
+ }
+ aTF := sf.Type
+ aVF := reflect.ValueOf(t.Get(idx)).FieldByName("Fields")
+ bVF := reflect.ValueOf(b).FieldByName("Fields")
+
+ if aVF.NumField() != bVF.NumField() {
+ return false
+ }
+ for i := 0; i < aVF.NumField(); i++ {
+ aiSF := aTF.Field(i)
+ aiF := aVF.Field(i)
+ biF := bVF.FieldByName(aiSF.Name)
+ if aiF.Type() != biF.Type() {
+ return false
+ }
+ if aiF.Type().String() == "time.Time" {
+ if !isSameAirtableDate(aiF.Interface().(time.Time), biF.Interface().(time.Time)) {
+ return false
+ }
+ } else if aiF.Type().String() == "[]string" {
+ aS, bS := aiF.Interface().([]string), biF.Interface().([]string)
+ if aS == nil {
+ aS = []string{}
+ }
+ if bS == nil {
+ bS = []string{}
+ }
+ sort.Strings(aS)
+ sort.Strings(bS)
+ if !reflect.DeepEqual(aS, bS) {
+ return false
+ }
+ continue
+ } else {
+ if !reflect.DeepEqual(aiF.Interface(), biF.Interface()) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func isSameAirtableDate(a, b time.Time) bool {
+ return a.Truncate(time.Millisecond).UTC() == b.Truncate(time.Millisecond).UTC()
+}
+
+type Table struct {
+ elems interface{}
+}
+
+func (t Table) SetState(idx int, state State) {
+ s := reflect.ValueOf(t.elems).Elem().Index(idx).FieldByName("State")
+ s.SetInt(int64(state))
+}
+
+func (t Table) GetState(idx int) State {
+ return State(reflect.ValueOf(t.elems).Elem().Index(idx).FieldByName("State").Int())
+}
+
+// CopyFields copies the 'Fields' struct from srcRecord into the Record at idx in the Tabel t.
+// Will panic necessary fields do not exist.
+func (t Table) CopyFields(idx int, srcRecord interface{}) {
+ dstF := reflect.ValueOf(t.elems).Elem().Index(idx).FieldByName("Fields")
+ srcF := reflect.ValueOf(srcRecord).FieldByName("Fields")
+ dstF.Set(srcF)
+}
+
+// GetFieldID returns the ID field of the Fields struct of the record at idx in the Table t.
+// Will panic necessary fields do not exist.
+func (t Table) GetFieldID(idx int) string {
+ return reflect.ValueOf(t.elems).Elem().Index(idx).FieldByName("Fields").FieldByName("ID").String()
+}
+
+// GetID returns the ID field of the record at idx in the Table t.
+func (t Table) GetID(idx int) string {
+ return reflect.ValueOf(t.elems).Elem().Index(idx).FieldByName("ID").String()
+}
+
+// Len returns the number of records in the table.
+func (t Table) Len() int {
+ return reflect.ValueOf(t.elems).Elem().Len()
+}
+
+// Append appends the given record to the table. Will panic if the given record is not of the right type.
+func (t Table) Append(record interface{}) {
+ a := reflect.Append(reflect.ValueOf(t.elems).Elem(), reflect.ValueOf(record))
+ reflect.ValueOf(t.elems).Elem().Set(a)
+}
+
+// Fetch retrieves the airtable table records from at over the network and inserts the records into the table.
+func (t Table) Fetch(at airtable.Table) error {
+ return at.List(t.elems, &airtable.Options{})
+}
+
+// FindByID searches the table for a record with Fields.ID equal to id.
+// Returns the record's ID if a match is found. Otherwise, returns the empty string.
+func (t Table) FindByID(id string) string {
+ slice := reflect.ValueOf(t.elems).Elem()
+ for i := 0; i < slice.Len(); i++ {
+ record := slice.Index(i)
+ fieldID := record.FieldByName("Fields").FieldByName("ID").String()
+ if fieldID == id {
+ return record.FieldByName("ID").String()
+ }
+ }
+ return ""
+}
+
+// GetPtr returns an interface containing a pointer to the record in the table at index idx.
+func (t Table) GetPtr(idx int) interface{} {
+ return reflect.ValueOf(t.elems).Elem().Index(idx).Addr().Interface()
+}
+
+// Get returns an interface to the record in the table at idx.
+func (t Table) Get(idx int) interface{} {
+ return reflect.ValueOf(t.elems).Elem().Index(idx).Interface()
+}
+
+// StringAt returns a JSON string of the record in the table at idx.
+func (t Table) StringAt(idx int) string {
+ out := reflect.ValueOf(t.elems).Elem().Index(idx).MethodByName("String").Call(nil)
+ return out[0].String()
+}
+
+type DB struct {
+ Tables []Table
+}
+
+func NewDB() DB {
+ db := DB{
+ Tables: make([]Table, NumTables),
+ }
+ db.Tables[IssueIndex].elems = &[]IssueRecord{}
+ db.Tables[RepositoryIndex].elems = &[]RepositoryRecord{}
+ db.Tables[AccountIndex].elems = &[]AccountRecord{}
+ db.Tables[LabelIndex].elems = &[]LabelRecord{}
+ db.Tables[MilestoneIndex].elems = &[]MilestoneRecord{}
+ db.Tables[ProviderIndex].elems = &[]ProviderRecord{}
+ if len(db.Tables) != NumTables {
+ panic("missing an airtabledb Table")
+ }
+ return db
+}
+
+type Base struct {
+ ID string `json:"id"`
+ CreatedAt time.Time `json:"created-at"`
+ UpdatedAt time.Time `json:"updated-at"`
+ Errors string `json:"errors"`
+}
+
+type State int
+
+// Unfortunately, the order matters here.
+// We must first compute Records which are referenced by other Records...
+const (
+ ProviderIndex = iota
+ LabelIndex
+ AccountIndex
+ RepositoryIndex
+ MilestoneIndex
+ IssueIndex
+ NumTables
+)
+
+var (
+ TableNameToIndex = map[string]int{
+ "provider": ProviderIndex,
+ "label": LabelIndex,
+ "account": AccountIndex,
+ "repository": RepositoryIndex,
+ "milestone": MilestoneIndex,
+ "issue": IssueIndex,
+ }
+)
+
+const (
+ StateUnknown State = iota
+ StateUnchanged
+ StateChanged
+ StateNew
+)
+
+var (
+ StateString = map[State]string{
+ StateUnknown: "unknown",
+ StateUnchanged: "unchanged",
+ StateChanged: "changed",
+ StateNew: "new",
+ }
+)
+
+//
+// provider
+//
+
+type ProviderRecord struct {
+ State State `json:"-"` // internal
+
+ airtable.Record // provides ID, CreatedTime
+ Fields struct {
+ // base
+ Base
+
+ // specific
+ URL string `json:"url"`
+ Driver string `json:"driver"`
+
+ // relationship
+ // n/a
+ } `json:"fields,omitempty"`
+}
+
+func (r ProviderRecord) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
+
+//
+// label
+//
+
+type LabelRecord struct {
+ State State `json:"-"` // internal
+
+ airtable.Record // provides ID, CreatedTime
+ Fields struct {
+ // base
+ Base
+
+ // specific
+ URL string `json:"url"`
+ Name string `json:"name"`
+ Color string `json:"color"`
+ Description string `json:"description"`
+
+ // relationship
+ // n/a
+ } `json:"fields,omitempty"`
+}
+
+func (r LabelRecord) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
+
+//
+// account
+//
+
+type AccountRecord struct {
+ State State `json:"-"` // internal
+
+ airtable.Record // provides ID, CreatedTime
+ Fields struct {
+ // base
+ Base
+
+ // specific
+ URL string `json:"url"`
+ Login string `json:"login"`
+ FullName string `json:"fullname"`
+ Type string `json:"type"`
+ Bio string `json:"bio"`
+ Location string `json:"location"`
+ Company string `json:"company"`
+ Blog string `json:"blog"`
+ Email string `json:"email"`
+ AvatarURL string `json:"avatar-url"`
+
+ // relationships
+ Provider []string `json:"provider"`
+ } `json:"fields,omitempty"`
+}
+
+func (r AccountRecord) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
+
+//
+// repository
+//
+
+type RepositoryRecord struct {
+ State State `json:"-"` // internal
+
+ airtable.Record // provides ID, CreatedTime
+ Fields struct {
+ // base
+ Base
+
+ // specific
+ URL string `json:"url"`
+ Title string `json:"title"`
+ Description string `json:"description"`
+ Homepage string `json:"homepage"`
+ PushedAt time.Time `json:"pushed-at"`
+ IsFork bool `json:"is-fork"`
+
+ // relationships
+ Provider []string `json:"provider"`
+ Owner []string `json:"owner"`
+ } `json:"fields,omitempty"`
+}
+
+func (r RepositoryRecord) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
+
+//
+// milestone
+//
+
+type MilestoneRecord struct {
+ State State `json:"-"` // internal
+
+ airtable.Record // provides ID, CreatedTime
+ Fields struct {
+ // base
+ Base
+
+ // specific
+ URL string `json:"url"`
+ Title string `json:"title"`
+ Description string `json:"description"`
+ ClosedAt time.Time `json:"closed-at"`
+ DueOn time.Time `json:"due-on"`
+
+ // relationships
+ Creator []string `json:"creator"`
+ Repository []string `json:"repository"`
+ } `json:"fields,omitempty"`
+}
+
+func (r MilestoneRecord) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
+
+//
+// issue
+//
+
+type IssueRecord struct {
+ State State `json:"-"` // internal
+
+ airtable.Record // provides ID, CreatedTime
+ Fields struct {
+ // base
+ Base
+
+ // specific
+ URL string `json:"url"`
+ CompletedAt time.Time `json:"completed-at"`
+ Title string `json:"title"`
+ State string `json:"state"`
+ Body string `json:"body"`
+ IsPR bool `json:"is-pr"`
+ IsLocked bool `json:"is-locked"`
+ Comments int `json:"comments"`
+ Upvotes int `json:"upvotes"`
+ Downvotes int `json:"downvotes"`
+ IsOrphan bool `json:"is-orphan"`
+ IsHidden bool `json:"is-hidden"`
+ Weight int `json:"weight"`
+ IsEpic bool `json:"is-epic"`
+ HasEpic bool `json:"has-epic"`
+
+ // relationships
+ Repository []string `json:"repository"`
+ Milestone []string `json:"milestone"`
+ Author []string `json:"author"`
+ Labels []string `json:"labels"`
+ Assignees []string `json:"assignees"`
+ //Parents []string `json:"-"`
+ //Children []string `json:"-"`
+ //Duplicates []string `json:"-"`
+ } `json:"fields,omitempty"`
+}
+
+func (r IssueRecord) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
diff --git a/github.go b/pkg/issues/github.go
similarity index 90%
rename from github.go
rename to pkg/issues/github.go
index 5252b1d8c..2732b2af4 100644
--- a/github.go
+++ b/pkg/issues/github.go
@@ -1,4 +1,4 @@
-package main
+package issues
import (
"context"
@@ -7,25 +7,26 @@ import (
"sync"
"github.com/google/go-github/github"
+ "github.com/jinzhu/gorm"
"go.uber.org/zap"
"golang.org/x/oauth2"
)
-func githubPull(target Target, wg *sync.WaitGroup, opts *pullOptions, out chan []*Issue) {
+func githubPull(target Target, wg *sync.WaitGroup, token string, db *gorm.DB, out chan<- []*Issue) {
defer wg.Done()
ctx := context.Background()
- ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: opts.GithubToken})
+ ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
- total := 0
+ totalIssues := 0
callOpts := &github.IssueListByRepoOptions{State: "all"}
var lastEntry Issue
if err := db.Where("repository_id = ?", target.ProjectURL()).Order("updated_at desc").First(&lastEntry).Error; err == nil {
callOpts.Since = lastEntry.UpdatedAt
} else {
- logger().Warn("failed to get last entry", zap.Error(err))
+ zap.L().Warn("failed to get last entry", zap.Error(err))
}
for {
@@ -34,12 +35,12 @@ func githubPull(target Target, wg *sync.WaitGroup, opts *pullOptions, out chan [
log.Fatal(err)
return
}
- total += len(issues)
- logger().Debug("paginate",
+ totalIssues += len(issues)
+ zap.L().Debug("paginate",
zap.String("provider", "github"),
zap.String("repo", target.ProjectURL()),
zap.Int("new-issues", len(issues)),
- zap.Int("total-issues", total),
+ zap.Int("total-issues", totalIssues),
)
normalizedIssues := []*Issue{}
for _, issue := range issues {
@@ -52,7 +53,7 @@ func githubPull(target Target, wg *sync.WaitGroup, opts *pullOptions, out chan [
callOpts.Page = resp.NextPage
}
if rateLimits, _, err := client.RateLimits(ctx); err == nil {
- logger().Debug("github API rate limiting", zap.Stringer("limit", rateLimits.GetCore()))
+ zap.L().Debug("github API rate limiting", zap.Stringer("limit", rateLimits.GetCore()))
}
}
diff --git a/gitlab.go b/pkg/issues/gitlab.go
similarity index 93%
rename from gitlab.go
rename to pkg/issues/gitlab.go
index 9b7a9f37a..04603895d 100644
--- a/gitlab.go
+++ b/pkg/issues/gitlab.go
@@ -1,4 +1,4 @@
-package main
+package issues
import (
"fmt"
@@ -7,13 +7,14 @@ import (
"sync"
"time"
+ "github.com/jinzhu/gorm"
gitlab "github.com/xanzy/go-gitlab"
"go.uber.org/zap"
)
-func gitlabPull(target Target, wg *sync.WaitGroup, opts *pullOptions, out chan []*Issue) {
+func gitlabPull(target Target, wg *sync.WaitGroup, token string, db *gorm.DB, out chan []*Issue) {
defer wg.Done()
- client := gitlab.NewClient(nil, opts.GitlabToken)
+ client := gitlab.NewClient(nil, token)
client.SetBaseURL(fmt.Sprintf("%s/api/v4", target.ProviderURL()))
total := 0
gitlabOpts := &gitlab.ListProjectIssuesOptions{
@@ -33,11 +34,11 @@ func gitlabPull(target Target, wg *sync.WaitGroup, opts *pullOptions, out chan [
for {
issues, resp, err := client.Issues.ListProjectIssues(target.Path(), gitlabOpts)
if err != nil {
- logger().Error("failed to pull issues", zap.Error(err))
+ zap.L().Error("failed to pull issues", zap.Error(err))
return
}
total += len(issues)
- logger().Debug("paginate",
+ zap.L().Debug("paginate",
zap.String("provider", "gitlab"),
zap.String("repo", target.ProjectURL()),
zap.Int("new-issues", len(issues)),
@@ -169,7 +170,7 @@ func fromGitlabFakeUser(provider *Provider, input gitlabFakeUser) *Account {
func fromGitlabRepositoryURL(input string) *Repository {
u, err := url.Parse(input)
if err != nil {
- logger().Warn("invalid repository URL", zap.String("URL", input))
+ zap.L().Warn("invalid repository URL", zap.String("URL", input))
return nil
}
providerURL := fmt.Sprintf("%s://%s", u.Scheme, u.Host)
diff --git a/pkg/issues/issue.go b/pkg/issues/issue.go
new file mode 100644
index 000000000..09e1d9f53
--- /dev/null
+++ b/pkg/issues/issue.go
@@ -0,0 +1,388 @@
+package issues
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/jinzhu/gorm"
+)
+
+var (
+ rxDNSName = regexp.MustCompile(`^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`)
+ childrenRegex, _ = regexp.Compile(`(?i)(require|requires|blocked by|block by|depend on|depends on|parent of) ([a-z0-9:/_.-]+issues/[0-9]+|[a-z0-9:/_.-]+#[0-9]+|[a-z0-9/_-]*#[0-9]+)`)
+ parentsRegex, _ = regexp.Compile(`(?i)(blocks|block|address|addresses|part of|child of|fix|fixes) ([a-z0-9:/_.-]+issues/[0-9]+|[a-z0-9:/_.-]+#[0-9]+|[a-z0-9/_-]*#[0-9]+)`)
+ isDuplicateRegex, _ = regexp.Compile(`(?i)(duplicates|duplicate|dup of|dup|duplicate of) ([a-z0-9:/_.-]+issues/[0-9]+|[a-z0-9:/_.-]+#[0-9]+|[a-z0-9/_-]*#[0-9]+)`)
+ //weightMultiplierRegex, _ = regexp.Compile(`(?i)(depviz.weight_multiplier[:= ]+)([0-9]+)`)
+ weightRegex, _ = regexp.Compile(`(?i)(depviz.base_weight|depviz.weight)[:= ]+([0-9]+)`)
+ hideRegex, _ = regexp.Compile(`(?i)(depviz.hide)`) // FIXME: use label
+)
+
+// PullAndCompute pulls issues from the given targets, computes their fields, and stores the issues in the database.
+func PullAndCompute(githubToken, gitlabToken string, db *gorm.DB, t Targets) error {
+ // FIXME: handle the special '@me' target
+
+ var (
+ wg sync.WaitGroup
+ allIssues []*Issue
+ out = make(chan []*Issue, 100)
+ )
+
+ targets := t.UniqueProjects()
+
+ // parallel fetches
+ wg.Add(len(targets))
+ for _, target := range targets {
+ switch target.Driver() {
+ case GithubDriver:
+ go githubPull(target, &wg, githubToken, db, out)
+ case GitlabDriver:
+ go gitlabPull(target, &wg, gitlabToken, db, out)
+ default:
+ panic("should not happen")
+ }
+ }
+ wg.Wait()
+ close(out)
+ for issues := range out {
+ allIssues = append(allIssues, issues...)
+ }
+
+ // save
+ for _, issue := range allIssues {
+ if err := db.Save(issue).Error; err != nil {
+ return err
+ }
+ }
+
+ return Compute(db)
+}
+
+// Compute loads issues from the given database, computes their fields, and stores the issues back into the database.
+func Compute(db *gorm.DB) error {
+ issues, err := Load(db, nil)
+ if err != nil {
+ return err
+ }
+
+ for _, issue := range issues {
+ // reset default values
+ issue.Errors = []string{}
+ issue.Parents = []*Issue{}
+ issue.Children = []*Issue{}
+ issue.Duplicates = []*Issue{}
+ issue.Weight = 0
+ issue.IsHidden = false
+ issue.IsEpic = false
+ issue.HasEpic = false
+ issue.IsOrphan = true
+ }
+
+ for _, issue := range issues {
+ if issue.Body == "" {
+ continue
+ }
+
+ // is epic
+ for _, label := range issue.Labels {
+ // FIXME: get epic labels dynamically based on a configuration filein the repo
+ if label.Name == "epic" || label.Name == "t/epic" {
+ issue.IsEpic = true
+ }
+ }
+
+ // hidden
+ if match := hideRegex.FindStringSubmatch(issue.Body); match != nil {
+ issue.IsHidden = true
+ continue
+ }
+
+ // duplicates
+ if match := isDuplicateRegex.FindStringSubmatch(issue.Body); match != nil {
+ canonical := issue.GetRelativeURL(match[len(match)-1])
+ rel := issues.Get(canonical)
+ if rel == nil {
+ issue.Errors = append(issue.Errors, fmt.Errorf("duplicate %q not found", canonical).Error())
+ continue
+ }
+ issue.Duplicates = append(issue.Duplicates, rel)
+ issue.IsHidden = true
+ continue
+ }
+
+ // weight
+ if match := weightRegex.FindStringSubmatch(issue.Body); match != nil {
+ issue.Weight, _ = strconv.Atoi(match[len(match)-1])
+ }
+
+ // children
+ for _, match := range childrenRegex.FindAllStringSubmatch(issue.Body, -1) {
+ canonical := issue.GetRelativeURL(match[len(match)-1])
+ child := issues.Get(canonical)
+ if child == nil {
+ issue.Errors = append(issue.Errors, fmt.Errorf("children %q not found", canonical).Error())
+ continue
+ }
+ issue.Children = append(issue.Children, child)
+ issue.IsOrphan = false
+ child.Parents = append(child.Parents, issue)
+ child.IsOrphan = false
+ }
+
+ // parents
+ for _, match := range parentsRegex.FindAllStringSubmatch(issue.Body, -1) {
+ canonical := issue.GetRelativeURL(match[len(match)-1])
+ parent := issues.Get(canonical)
+ if parent == nil {
+ issue.Errors = append(issue.Errors, fmt.Errorf("parent %q not found", canonical).Error())
+ continue
+ }
+ issue.Parents = append(issue.Parents, parent)
+ issue.IsOrphan = false
+ parent.Children = append(parent.Children, issue)
+ parent.IsOrphan = false
+ }
+ }
+
+ for _, issue := range issues {
+ if issue.IsEpic {
+ issue.HasEpic = true
+ continue
+ }
+ // has epic
+ issue.HasEpic, err = computeHasEpic(issue, 0)
+ if err != nil {
+ issue.Errors = append(issue.Errors, err.Error())
+ }
+ }
+
+ for _, issue := range issues {
+ issue.PostLoad()
+
+ issue.ParentIDs = uniqueStrings(issue.ParentIDs)
+ sort.Strings(issue.ParentIDs)
+ issue.ChildIDs = uniqueStrings(issue.ChildIDs)
+ sort.Strings(issue.ChildIDs)
+ issue.DuplicateIDs = uniqueStrings(issue.DuplicateIDs)
+ sort.Strings(issue.DuplicateIDs)
+ }
+
+ for _, issue := range issues {
+ // TODO: add a "if changed" to preserve some CPU and time
+ if err := db.Set("gorm:association_autoupdate", false).Save(issue).Error; err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Load returns the issues stored in the database.
+func Load(db *gorm.DB, targets []Target) (Issues, error) {
+ query := db.Model(Issue{}).Order("created_at")
+ if len(targets) > 0 {
+ return nil, fmt.Errorf("not implemented")
+ // query = query.Where("repo_url IN (?)", canonicalTargets(targets))
+ // OR WHERE parents IN ....
+ // etc
+ }
+
+ perPage := 100
+ var issues []*Issue
+ for page := 0; ; page++ {
+ var newIssues []*Issue
+ if err := query.Limit(perPage).Offset(perPage * page).Find(&newIssues).Error; err != nil {
+ return nil, err
+ }
+ issues = append(issues, newIssues...)
+ if len(newIssues) < perPage {
+ break
+ }
+ }
+
+ for _, issue := range issues {
+ issue.PostLoad()
+ }
+
+ return Issues(issues), nil
+}
+
+// FIXME: try to use gorm hooks to auto preload/postload items
+
+func (i *Issue) Number() string {
+ u, err := url.Parse(i.URL)
+ if err != nil {
+ return ""
+ }
+ parts := strings.Split(u.Path, "/")
+ return parts[len(parts)-1]
+}
+
+func (i *Issue) Path() string {
+ u, err := url.Parse(i.URL)
+ if err != nil {
+ return ""
+ }
+ parts := strings.Split(u.Path, "/")
+ return strings.Join(parts[:len(parts)-2], "/")
+}
+
+func (i *Issue) Render(w http.ResponseWriter, r *http.Request) error {
+ return nil
+}
+
+func (i Issue) GetRelativeURL(target string) string {
+ if strings.Contains(target, "://") {
+ return normalizeURL(target)
+ }
+
+ if target[0] == '#' {
+ return fmt.Sprintf("%s/issues/%s", i.Repository.URL, target[1:])
+ }
+
+ target = strings.Replace(target, "#", "/issues/", -1)
+
+ parts := strings.Split(target, "/")
+ if strings.Contains(parts[0], ".") && isDNSName(parts[0]) {
+ return fmt.Sprintf("https://%s", target)
+ }
+
+ return fmt.Sprintf("%s/%s", strings.TrimRight(i.Repository.Provider.URL, "/"), target)
+}
+
+func (i *Issue) PostLoad() {
+ i.ParentIDs = []string{}
+ i.ChildIDs = []string{}
+ i.DuplicateIDs = []string{}
+ for _, rel := range i.Parents {
+ i.ParentIDs = append(i.ParentIDs, rel.ID)
+ }
+ for _, rel := range i.Children {
+ i.ChildIDs = append(i.ChildIDs, rel.ID)
+ }
+ for _, rel := range i.Duplicates {
+ i.DuplicateIDs = append(i.DuplicateIDs, rel.ID)
+ }
+}
+
+func (i Issue) IsClosed() bool {
+ return i.State == "closed"
+}
+
+func (i Issue) IsReady() bool {
+ return !i.IsOrphan && len(i.Parents) == 0 // FIXME: switch parents with children?
+}
+
+func (i Issue) MatchesWithATarget(targets Targets) bool {
+ return i.matchesWithATarget(targets, 0)
+}
+
+type Issues []*Issue
+
+func (issues Issues) Get(id string) *Issue {
+ for _, issue := range issues {
+ if issue.ID == id {
+ return issue
+ }
+ }
+ return nil
+}
+
+func (issues Issues) FilterByTargets(targets []Target) Issues {
+ filtered := Issues{}
+
+ for _, issue := range issues {
+ if issue.MatchesWithATarget(targets) {
+ filtered = append(filtered, issue)
+ }
+ }
+
+ return filtered
+}
+
+func normalizeURL(input string) string {
+ parts := strings.Split(input, "://")
+ output := fmt.Sprintf("%s://%s", parts[0], strings.Replace(parts[1], "//", "/", -1))
+ output = strings.TrimRight(output, "#")
+ output = strings.TrimRight(output, "/")
+ return output
+}
+
+func isDNSName(input string) bool {
+ return rxDNSName.MatchString(input)
+}
+
+func computeHasEpic(i *Issue, depth int) (bool, error) {
+ if depth > 100 {
+ return false, fmt.Errorf("very high blocking depth (>100), do not continue. (issue=%s)", i.URL)
+ }
+ if i.IsHidden {
+ return false, nil
+ }
+ for _, parent := range i.Parents {
+ if parent.IsEpic {
+ return true, nil
+ }
+ parentHasEpic, err := computeHasEpic(parent, depth+1)
+ if err != nil {
+ return false, nil
+ }
+ if parentHasEpic {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func uniqueStrings(input []string) []string {
+ u := make([]string, 0, len(input))
+ m := make(map[string]bool)
+
+ for _, val := range input {
+ if _, ok := m[val]; !ok {
+ m[val] = true
+ u = append(u, val)
+ }
+ }
+
+ return u
+}
+
+func (i Issue) matchesWithATarget(targets Targets, depth int) bool {
+ if depth > 100 {
+ log.Printf("circular dependency or too deep graph (>100), skipping this node. (issue=%s)", i)
+ return false
+ }
+
+ for _, target := range targets {
+ if target.Issue() != "" { // issue-mode
+ if target.Canonical() == i.URL {
+ return true
+ }
+ } else { // project-mode
+ if i.RepositoryID == target.ProjectURL() {
+ return true
+ }
+ }
+ }
+
+ for _, parent := range i.Parents {
+ if parent.matchesWithATarget(targets, depth+1) {
+ return true
+ }
+ }
+
+ for _, child := range i.Children {
+ if child.matchesWithATarget(targets, depth+1) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/issue_test.go b/pkg/issues/issue_test.go
similarity index 99%
rename from issue_test.go
rename to pkg/issues/issue_test.go
index a2fb2ea63..bfffdb79b 100644
--- a/issue_test.go
+++ b/pkg/issues/issue_test.go
@@ -1,4 +1,4 @@
-package main
+package issues
import "fmt"
diff --git a/models.go b/pkg/issues/models.go
similarity index 56%
rename from models.go
rename to pkg/issues/models.go
index 4d9837b23..fb163341c 100644
--- a/models.go
+++ b/pkg/issues/models.go
@@ -1,12 +1,71 @@
-package main
+package issues
import (
"encoding/json"
+ "reflect"
+ "strings"
"time"
"github.com/lib/pq"
+ "moul.io/depviz/pkg/airtabledb"
)
+type Feature interface {
+ String() string
+ GetID() string
+ ToRecord(airtabledb.DB) airtabledb.Record
+}
+
+// toRecord attempts to automatically convert between an issues.Feature and an airtable Record.
+// It's not particularly robust, but it works for structs following the format of Features and Records.
+func toRecord(cache airtabledb.DB, src Feature, dst interface{}) {
+ dV := reflect.ValueOf(dst).Elem().FieldByName("Fields")
+ sV := reflect.ValueOf(src)
+ copyFields(cache, sV, dV)
+}
+
+func copyFields(cache airtabledb.DB, src reflect.Value, dst reflect.Value) {
+ dT := dst.Type()
+ for i := 0; i < dst.NumField(); i++ {
+ dFV := dst.Field(i)
+ dSF := dT.Field(i)
+ fieldName := dSF.Name
+ // Recursively copy the embeded struct Base.
+ if fieldName == "Base" {
+ copyFields(cache, src, dFV)
+ continue
+ }
+ sFV := src.FieldByName(fieldName)
+ if fieldName == "Errors" {
+ dFV.Set(reflect.ValueOf(strings.Join(sFV.Interface().(pq.StringArray), ", ")))
+ continue
+ }
+ if dFV.Type().String() == "[]string" {
+ if sFV.Pointer() != 0 {
+ tableIndex := 0
+ srcFieldTypeName := strings.Split(strings.Trim(sFV.Type().String(), "*[]"), ".")[1]
+ tableIndex, ok := airtabledb.TableNameToIndex[strings.ToLower(srcFieldTypeName)]
+ if !ok {
+ panic("toRecord: could not find index for table name " + strings.ToLower(srcFieldTypeName))
+ }
+ if sFV.Kind() == reflect.Slice {
+ for i := 0; i < sFV.Len(); i++ {
+ idV := sFV.Index(i).Elem().FieldByName("ID")
+ id := idV.String()
+ dFV.Set(reflect.Append(dFV, reflect.ValueOf(cache.Tables[tableIndex].FindByID(id))))
+ }
+ } else {
+ idV := sFV.Elem().FieldByName("ID")
+ id := idV.String()
+ dFV.Set(reflect.ValueOf([]string{cache.Tables[tableIndex].FindByID(id)}))
+ }
+ }
+ } else {
+ dFV.Set(sFV)
+ }
+ }
+}
+
//
// Base
//
@@ -18,6 +77,10 @@ type Base struct {
Errors pq.StringArray `json:"errors,omitempty" gorm:"type:varchar[]"`
}
+func (b Base) GetID() string {
+ return b.ID
+}
+
//
// Repository
//
@@ -40,6 +103,17 @@ type Repository struct {
OwnerID string `json:"owner-id"`
}
+func (p Repository) ToRecord(cache airtabledb.DB) airtabledb.Record {
+ record := airtabledb.RepositoryRecord{}
+ toRecord(cache, p, &record)
+ return record
+}
+
+func (r Repository) String() string {
+ out, _ := json.Marshal(r)
+ return string(out)
+}
+
//
// Provider
//
@@ -60,6 +134,17 @@ type Provider struct {
Driver string `json:"driver"` // github, gitlab, unknown
}
+func (p Provider) ToRecord(cache airtabledb.DB) airtabledb.Record {
+ record := airtabledb.ProviderRecord{}
+ toRecord(cache, p, &record)
+ return record
+}
+
+func (p Provider) String() string {
+ out, _ := json.Marshal(p)
+ return string(out)
+}
+
//
// Milestone
//
@@ -83,6 +168,17 @@ type Milestone struct {
RepositoryID string `json:"repository-id"`
}
+func (p Milestone) ToRecord(cache airtabledb.DB) airtabledb.Record {
+ record := airtabledb.MilestoneRecord{}
+ toRecord(cache, p, &record)
+ return record
+}
+
+func (m Milestone) String() string {
+ out, _ := json.Marshal(m)
+ return string(out)
+}
+
//
// Issue
//
@@ -131,6 +227,12 @@ func (i Issue) String() string {
return string(out)
}
+func (p Issue) ToRecord(cache airtabledb.DB) airtabledb.Record {
+ record := airtabledb.IssueRecord{}
+ toRecord(cache, p, &record)
+ return record
+}
+
//
// Label
//
@@ -145,6 +247,17 @@ type Label struct {
Description string `json:"description"`
}
+func (p Label) ToRecord(cache airtabledb.DB) airtabledb.Record {
+ record := airtabledb.LabelRecord{}
+ toRecord(cache, p, &record)
+ return record
+}
+
+func (l Label) String() string {
+ out, _ := json.Marshal(l)
+ return string(out)
+}
+
//
// Account
//
@@ -169,4 +282,15 @@ type Account struct {
ProviderID string `json:"provider-id"`
}
+func (p Account) ToRecord(cache airtabledb.DB) airtabledb.Record {
+ record := airtabledb.AccountRecord{}
+ toRecord(cache, p, &record)
+ return record
+}
+
+func (a Account) String() string {
+ out, _ := json.Marshal(a)
+ return string(out)
+}
+
// FIXME: create a User struct to handle multiple accounts and aliases
diff --git a/target.go b/pkg/issues/target.go
similarity index 99%
rename from target.go
rename to pkg/issues/target.go
index 242757dd7..02ce2089c 100644
--- a/target.go
+++ b/pkg/issues/target.go
@@ -1,4 +1,4 @@
-package main
+package issues
import (
"fmt"
diff --git a/target_test.go b/pkg/issues/target_test.go
similarity index 99%
rename from target_test.go
rename to pkg/issues/target_test.go
index 3219f1091..281346425 100644
--- a/target_test.go
+++ b/pkg/issues/target_test.go
@@ -1,4 +1,4 @@
-package main
+package issues
import "fmt"
diff --git a/util.go b/util.go
deleted file mode 100644
index f9dd6c595..000000000
--- a/util.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package main
-
-import (
- "fmt"
- "reflect"
- "regexp"
- "sort"
- "strings"
- "time"
-)
-
-func wrap(text string, lineWidth int) string {
- words := strings.Fields(strings.TrimSpace(text))
- if len(words) == 0 {
- return text
- }
- wrapped := words[0]
- spaceLeft := lineWidth - len(wrapped)
- for _, word := range words[1:] {
- if len(word)+1 > spaceLeft {
- wrapped += "\n" + word
- spaceLeft = lineWidth - len(word)
- } else {
- wrapped += " " + word
- spaceLeft -= 1 + len(word)
- }
- }
-
- return wrapped
-}
-
-func escape(input string) string {
- return fmt.Sprintf("%q", input)
-}
-
-func panicIfErr(err error) {
- if err != nil {
- panic(err)
- }
-}
-
-func uniqueStrings(input []string) []string {
- u := make([]string, 0, len(input))
- m := make(map[string]bool)
-
- for _, val := range input {
- if _, ok := m[val]; !ok {
- m[val] = true
- u = append(u, val)
- }
- }
-
- return u
-}
-
-func normalizeURL(input string) string {
- parts := strings.Split(input, "://")
- output := fmt.Sprintf("%s://%s", parts[0], strings.Replace(parts[1], "//", "/", -1))
- output = strings.TrimRight(output, "#")
- output = strings.TrimRight(output, "/")
- return output
-}
-
-var rxDNSName = regexp.MustCompile(`^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`)
-
-func isDNSName(input string) bool {
- return rxDNSName.MatchString(input)
-}
-
-func isSameStringSlice(a, b []string) bool {
- if a == nil {
- a = []string{}
- }
- if b == nil {
- b = []string{}
- }
- sort.Strings(a)
- sort.Strings(b)
- return reflect.DeepEqual(a, b)
-}
-
-func isSameAirtableDate(a, b time.Time) bool {
- return a.Truncate(time.Millisecond).UTC() == b.Truncate(time.Millisecond).UTC()
-}