From eb0298b9d56eb9b0f4d9e27964c951e02d254f08 Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Thu, 30 Mar 2017 13:22:37 -0700 Subject: [PATCH 01/10] report: Refactor various methods to avoid explicitly listing topologies By reducing the number of times we refer to every topology by name line by line, we make it easier to add new topologies, reduce the risk of bugs where a topology is not listed, and reduce the risk of the repeated lines getting out of sync with each other. We introduce two new methods to assist this: WalkPairedTopologies, a modified WalkTopologies that gives the called function the same topology from two reports. This is used, for example, to implement Copy and Merge. TopologyMap, which returns a map of all topologies by name. This is then used to implement all other methods. This leaves only 4 instances of listing topologies: In the consts at the top of the file, to give it a name In the struct itself In the constructor, where we need to set per-topology settings In TopologyMap --- report/report.go | 114 +++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 62 deletions(-) diff --git a/report/report.go b/report/report.go index e671891c5f..9c6ed506a4 100644 --- a/report/report.go +++ b/report/report.go @@ -177,49 +177,49 @@ func MakeReport() Report { } } +// TopologyMap gets a map from topology names to pointers to the respective topologies +func (r *Report) TopologyMap() map[string]*Topology { + return map[string]*Topology{ + Endpoint: &r.Endpoint, + Process: &r.Process, + Container: &r.Container, + ContainerImage: &r.ContainerImage, + Pod: &r.Pod, + Service: &r.Service, + Deployment: &r.Deployment, + ReplicaSet: &r.ReplicaSet, + Host: &r.Host, + Overlay: &r.Overlay, + ECSTask: &r.ECSTask, + ECSService: &r.ECSService, + } +} + // Copy returns a value copy of the report. func (r Report) Copy() Report { - return Report{ - Endpoint: r.Endpoint.Copy(), - Process: r.Process.Copy(), - Container: r.Container.Copy(), - ContainerImage: r.ContainerImage.Copy(), - Host: r.Host.Copy(), - Pod: r.Pod.Copy(), - Service: r.Service.Copy(), - Deployment: r.Deployment.Copy(), - ReplicaSet: r.ReplicaSet.Copy(), - Overlay: r.Overlay.Copy(), - ECSTask: r.ECSTask.Copy(), - ECSService: r.ECSService.Copy(), - Sampling: r.Sampling, - Window: r.Window, - Plugins: r.Plugins.Copy(), - ID: fmt.Sprintf("%d", rand.Int63()), + newReport := Report{ + Sampling: r.Sampling, + Window: r.Window, + Plugins: r.Plugins.Copy(), + ID: fmt.Sprintf("%d", rand.Int63()), } + newReport.WalkPairedTopologies(&r, func(newTopology, oldTopology *Topology) { + *newTopology = oldTopology.Copy() + }) + return newReport } // Merge merges another Report into the receiver and returns the result. The // original is not modified. func (r Report) Merge(other Report) Report { - return Report{ - Endpoint: r.Endpoint.Merge(other.Endpoint), - Process: r.Process.Merge(other.Process), - Container: r.Container.Merge(other.Container), - ContainerImage: r.ContainerImage.Merge(other.ContainerImage), - Host: r.Host.Merge(other.Host), - Pod: r.Pod.Merge(other.Pod), - Service: r.Service.Merge(other.Service), - Deployment: r.Deployment.Merge(other.Deployment), - ReplicaSet: r.ReplicaSet.Merge(other.ReplicaSet), - Overlay: r.Overlay.Merge(other.Overlay), - ECSTask: r.ECSTask.Merge(other.ECSTask), - ECSService: r.ECSService.Merge(other.ECSService), - Sampling: r.Sampling.Merge(other.Sampling), - Window: r.Window + other.Window, - Plugins: r.Plugins.Merge(other.Plugins), - ID: fmt.Sprintf("%d", rand.Int63()), - } + newReport := r.Copy() + newReport.Sampling = newReport.Sampling.Merge(other.Sampling) + newReport.Window = newReport.Window + other.Window + newReport.Plugins = newReport.Plugins.Merge(other.Plugins) + newReport.WalkPairedTopologies(&other, func(ourTopology, theirTopology *Topology) { + *ourTopology = ourTopology.Merge(*theirTopology) + }) + return newReport } // Topologies returns a slice of Topologies in this report @@ -234,37 +234,27 @@ func (r Report) Topologies() []Topology { // WalkTopologies iterates through the Topologies of the report, // potentially modifying them func (r *Report) WalkTopologies(f func(*Topology)) { - f(&r.Endpoint) - f(&r.Process) - f(&r.Container) - f(&r.ContainerImage) - f(&r.Pod) - f(&r.Service) - f(&r.Deployment) - f(&r.ReplicaSet) - f(&r.Host) - f(&r.Overlay) - f(&r.ECSTask) - f(&r.ECSService) + for _, t := range r.TopologyMap() { + f(t) + } +} + +// WalkPairedTopologies iterates through the Topologies of this and another report, +// potentially modifying one or both. +func (r *Report) WalkPairedTopologies(o *Report, f func(*Topology, *Topology)) { + rMap := r.TopologyMap() + oMap := o.TopologyMap() + for name := range rMap { + f(rMap[name], oMap[name]) + } } // Topology gets a topology by name func (r Report) Topology(name string) (Topology, bool) { - t, ok := map[string]Topology{ - Endpoint: r.Endpoint, - Process: r.Process, - Container: r.Container, - ContainerImage: r.ContainerImage, - Pod: r.Pod, - Service: r.Service, - Deployment: r.Deployment, - ReplicaSet: r.ReplicaSet, - Host: r.Host, - Overlay: r.Overlay, - ECSTask: r.ECSTask, - ECSService: r.ECSService, - }[name] - return t, ok + if t, ok := r.TopologyMap()[name]; ok { + return *t, true + } + return Topology{}, false } // Validate checks the report for various inconsistencies. From 75314cb910a7edf7e1ab90f029eb21bc1e581391 Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Mon, 3 Apr 2017 10:42:33 -0700 Subject: [PATCH 02/10] Reduce manually listing all topologies in a few places Prefer WalkTopologies to apply a uniform action to every topology, reducing need to make multiple changes and risk of errors if you forget one. --- probe/appclient/app_client_internal_test.go | 28 +++------------------ probe/probe_internal_test.go | 15 +++-------- probe/topology_tagger.go | 13 +--------- 3 files changed, 8 insertions(+), 48 deletions(-) diff --git a/probe/appclient/app_client_internal_test.go b/probe/appclient/app_client_internal_test.go index 73edd40747..66bcae51d7 100644 --- a/probe/appclient/app_client_internal_test.go +++ b/probe/appclient/app_client_internal_test.go @@ -73,30 +73,10 @@ func TestAppClientPublish(t *testing.T) { // marshalling->unmarshaling is not idempotent due to `json:"omitempty"` // tags, transforming empty slices into nils. So, we make DeepEqual // happy by setting empty `json:"omitempty"` entries to nil - rpt.Endpoint = report.MakeTopology() - rpt.Process = report.MakeTopology() - rpt.Container = report.MakeTopology() - rpt.ContainerImage = report.MakeTopology() - rpt.Pod = report.MakeTopology() - rpt.Service = report.MakeTopology() - rpt.Deployment = report.MakeTopology() - rpt.ReplicaSet = report.MakeTopology() - rpt.Host = report.MakeTopology() - rpt.Overlay = report.MakeTopology() - rpt.ECSTask = report.MakeTopology() - rpt.ECSService = report.MakeTopology() - rpt.Endpoint.Controls = nil - rpt.Process.Controls = nil - rpt.Container.Controls = nil - rpt.ContainerImage.Controls = nil - rpt.Pod.Controls = nil - rpt.Service.Controls = nil - rpt.Deployment.Controls = nil - rpt.ReplicaSet.Controls = nil - rpt.Host.Controls = nil - rpt.Overlay.Controls = nil - rpt.ECSTask.Controls = nil - rpt.ECSService.Controls = nil + rpt.WalkTopologies(func(to *report.Topology) { + *to = report.MakeTopology() + to.Controls = nil + }) s := dummyServer(t, token, id, version, rpt, done) defer s.Close() diff --git a/probe/probe_internal_test.go b/probe/probe_internal_test.go index 6b6b4f005e..42b7cbd58a 100644 --- a/probe/probe_internal_test.go +++ b/probe/probe_internal_test.go @@ -81,18 +81,9 @@ func TestProbe(t *testing.T) { // tags, transforming empty slices into nils. So, we make DeepEqual // happy by setting empty `json:"omitempty"` entries to nil node.Metrics = nil - want.Endpoint.Controls = nil - want.Process.Controls = nil - want.Container.Controls = nil - want.ContainerImage.Controls = nil - want.Pod.Controls = nil - want.Service.Controls = nil - want.Deployment.Controls = nil - want.ReplicaSet.Controls = nil - want.Host.Controls = nil - want.Overlay.Controls = nil - want.ECSTask.Controls = nil - want.ECSService.Controls = nil + want.WalkTopologies(func(t *report.Topology) { + t.Controls = nil + }) want.Endpoint.AddNode(node) pub := mockPublisher{make(chan report.Report, 10)} diff --git a/probe/topology_tagger.go b/probe/topology_tagger.go index cf41896d1b..ce37e19a36 100644 --- a/probe/topology_tagger.go +++ b/probe/topology_tagger.go @@ -16,18 +16,7 @@ func (topologyTagger) Name() string { return "Topology" } // Tag implements Tagger func (topologyTagger) Tag(r report.Report) (report.Report, error) { - for name, t := range map[string]*report.Topology{ - report.Endpoint: &(r.Endpoint), - report.Process: &(r.Process), - report.Container: &(r.Container), - report.ContainerImage: &(r.ContainerImage), - report.Pod: &(r.Pod), - report.Service: &(r.Service), - report.ECSTask: &(r.ECSTask), - report.ECSService: &(r.ECSService), - report.Host: &(r.Host), - report.Overlay: &(r.Overlay), - } { + for name, t := range r.TopologyMap() { for _, node := range t.Nodes { t.AddNode(node.WithTopology(name)) } From 14ab5cccebdab1a91513073a15471f9d95518a45 Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Mon, 10 Apr 2017 11:48:49 -0700 Subject: [PATCH 03/10] render: Maintain a list of 'primary' api topologies for each report topology This gives us a single source of truth in a variety of situations where we want to know what view to direct a user to in order to 'open' a particular node. I wanted to put this in app/api_topologies where the views are defined, but that creates a circular import. --- render/detailed/summary.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/render/detailed/summary.go b/render/detailed/summary.go index b289bba532..85f1d1bca4 100644 --- a/render/detailed/summary.go +++ b/render/detailed/summary.go @@ -81,6 +81,19 @@ var templates = map[string]struct{ Label, LabelMinor string }{ render.OutgoingInternetID: {render.OutboundMajor, render.OutboundMinor}, } +// For each report.Topology, map to a 'primary' API topology. This can then be used in a variety of places. +var primaryAPITopology = map[string]string{ + report.Container: "containers", + report.ContainerImage: "containers-by-image", + report.Pod: "pods", + report.ReplicaSet: "replica-sets", + report.Deployment: "deployments", + report.Service: "services", + report.ECSTask: "ecs-tasks", + report.ECSService: "ecs-services", + report.Host: "hosts", +} + // MakeNodeSummary summarizes a node, if possible. func MakeNodeSummary(r report.Report, n report.Node) (NodeSummary, bool) { if renderer, ok := renderers[n.Topology]; ok { From c16becc1481a3d73965c68bcb6c6f213465798bc Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Mon, 3 Apr 2017 12:15:10 -0700 Subject: [PATCH 04/10] render/detailed: When summarising children, add fallback for unlisted topologies Currently, if a topology does not have any specific info in nodeSummariesByID, any children of the node that belong to that topology will be silently omitted. This change adds a default behaviour for such topologies, with no special columns but at least it is displayed at all. Unlisted topologies are displayed after all listed ones, in arbitrary order. Note that completely bogus or other special cases (eg. topology = Pseudo) still will not be displayed as report.Topology() will fail. --- render/detailed/node.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/render/detailed/node.go b/render/detailed/node.go index fc5147ff7e..cd637c688d 100644 --- a/render/detailed/node.go +++ b/render/detailed/node.go @@ -246,14 +246,37 @@ func children(r report.Report, n report.Node) []NodeSummaryGroup { }) nodeSummaryGroups := []NodeSummaryGroup{} + // Apply specific group specs in the order they're listed for _, spec := range nodeSummaryGroupSpecs { if len(summaries[spec.topologyID]) > 0 { sort.Sort(nodeSummariesByID(summaries[spec.TopologyID])) group := spec.NodeSummaryGroup group.Nodes = summaries[spec.topologyID] nodeSummaryGroups = append(nodeSummaryGroups, group) + delete(summaries, spec.topologyID) } } + // As a fallback, in case a topology has no group spec defined, add any remaining at the end + for topologyID, nodeSummaries := range summaries { + if len(nodeSummaries) == 0 { + continue + } + topology, ok := r.Topology(topologyID) + if !ok { + continue + } + apiTopology, ok := primaryAPITopology[topologyID] + if !ok { + continue + } + sort.Sort(nodeSummariesByID(nodeSummaries)) + group := NodeSummaryGroup{ + ID: apiTopology, + Label: topology.LabelPlural, + Columns: []Column{}, + } + nodeSummaryGroups = append(nodeSummaryGroups, group) + } return nodeSummaryGroups } From 2a74883cceea2af5ad277d9b25c3e2ead684545e Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Mon, 3 Apr 2017 12:47:30 -0700 Subject: [PATCH 05/10] If no node summary generator exists for topology, do a sane default The default sets the node label to the node ID. This is likely to not look very good, but the intent is that it creates an obvious problem, ie. that the node ID is being used as the label, rather than a silent omission or more subtle problem. Possible future work: * For single-component IDs, extract the component automatically and use that instead. * Instead of functions, in simple cases just have a LUT by topology with common behaviours like 'stack = true or false', 'label = this key in node.Latest' The latter opens up to eventually moving this info inside the report itself ala topology templates, or at least centralizing it in the source. --- render/detailed/summary.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/render/detailed/summary.go b/render/detailed/summary.go index 85f1d1bca4..6cde8d49a8 100644 --- a/render/detailed/summary.go +++ b/render/detailed/summary.go @@ -73,6 +73,7 @@ var renderers = map[string]func(NodeSummary, report.Node) (NodeSummary, bool){ report.ECSService: ecsServiceNodeSummary, report.Host: hostNodeSummary, report.Overlay: weaveNodeSummary, + report.Endpoint: nil, // Do not render } var templates = map[string]struct{ Label, LabelMinor string }{ @@ -97,7 +98,14 @@ var primaryAPITopology = map[string]string{ // MakeNodeSummary summarizes a node, if possible. func MakeNodeSummary(r report.Report, n report.Node) (NodeSummary, bool) { if renderer, ok := renderers[n.Topology]; ok { - return renderer(baseNodeSummary(r, n), n) + // Skip (and don't fall through to fallback) if renderer maps to nil + if renderer != nil { + return renderer(baseNodeSummary(r, n), n) + } + } else if _, ok := r.Topology(n.Topology); ok { + summary := baseNodeSummary(r, n) + summary.Label = n.ID // This is unlikely to look very good, but is a reasonable fallback + return summary, true } if strings.HasPrefix(n.Topology, "group:") { return groupNodeSummary(baseNodeSummary(r, n), r, n) From 9c88ad85e92d8bbb88cb5a52aa84ef01611b4266 Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Mon, 3 Apr 2017 15:37:35 -0700 Subject: [PATCH 06/10] render/detailed/parents: Refactor for less repeated information We replace the existing data structure with a simpler one that only specifies how to get the parent label, which is the only part of the Parent struct that can't be generated from the node info alone. Future work: Standardize this concept of a label and put it in the topology instead. Though that already exists...so just use it? --- render/detailed/parents.go | 127 ++++++++++++++----------------------- 1 file changed, 46 insertions(+), 81 deletions(-) diff --git a/render/detailed/parents.go b/render/detailed/parents.go index e909d0e010..b749e13265 100644 --- a/render/detailed/parents.go +++ b/render/detailed/parents.go @@ -16,117 +16,82 @@ type Parent struct { TopologyID string `json:"topologyId"` } -func node(t report.Topology) func(string) (report.Node, bool) { - return func(id string) (report.Node, bool) { - n, ok := t.Nodes[id] - return n, ok - } -} +var ( + kubernetesParentLabel = latestLookup(kubernetes.Name) -func fake(id string) (report.Node, bool) { - return report.MakeNode(id), true -} + getLabelForTopology = map[string]func(report.Node) string{ + report.Container: getRenderableContainerName, + report.Pod: kubernetesParentLabel, + report.ReplicaSet: kubernetesParentLabel, + report.Deployment: kubernetesParentLabel, + report.Service: kubernetesParentLabel, + report.ECSTask: latestLookup(awsecs.TaskFamily), + report.ECSService: ecsServiceParentLabel, + report.ContainerImage: containerImageParentLabel, + report.Host: latestLookup(host.HostName), + } +) // Parents renders the parents of this report.Node, which have been aggregated // from the probe reports. func Parents(r report.Report, n report.Node) (result []Parent) { - topologies := map[string]struct { - node func(id string) (report.Node, bool) - render func(report.Node) Parent - }{ - report.Container: {node(r.Container), containerParent}, - report.Pod: {node(r.Pod), podParent}, - report.ReplicaSet: {node(r.ReplicaSet), replicaSetParent}, - report.Deployment: {node(r.Deployment), deploymentParent}, - report.Service: {node(r.Service), serviceParent}, - report.ECSTask: {node(r.ECSTask), ecsTaskParent}, - report.ECSService: {node(r.ECSService), ecsServiceParent}, - report.ContainerImage: {fake, containerImageParent}, - report.Host: {node(r.Host), hostParent}, - } topologyIDs := []string{} - for topologyID := range topologies { + for topologyID := range getLabelForTopology { topologyIDs = append(topologyIDs, topologyID) } sort.Strings(topologyIDs) for _, topologyID := range topologyIDs { - t := topologies[topologyID] + getLabel := getLabelForTopology[topologyID] + topology, ok := r.Topology(topologyID) + if !ok { + continue + } parents, _ := n.Parents.Lookup(topologyID) for _, id := range parents { if topologyID == n.Topology && id == n.ID { continue } - parent, ok := t.node(id) + var parentNode report.Node + // Special case: container image parents should be empty nodes for some reason + if topologyID == report.ContainerImage { + parentNode = report.MakeNode(id) + } else { + if parent, ok := topology.Nodes[id]; ok { + parentNode = parent + } else { + continue + } + } + + apiTopologyID, ok := primaryAPITopology[topologyID] if !ok { continue } - result = append(result, t.render(parent)) + result = append(result, Parent{ + ID: id, + Label: getLabel(parentNode), + TopologyID: apiTopologyID, + }) } } return result } -func containerParent(n report.Node) Parent { - label := getRenderableContainerName(n) - return Parent{ - ID: n.ID, - Label: label, - TopologyID: "containers", +func latestLookup(key string) func(report.Node) string { + return func(n report.Node) string { + value, _ := n.Latest.Lookup(key) + return value } } -var ( - podParent = kubernetesParent("pods") - replicaSetParent = kubernetesParent("replica-sets") - deploymentParent = kubernetesParent("deployments") - serviceParent = kubernetesParent("services") -) - -func kubernetesParent(topology string) func(report.Node) Parent { - return func(n report.Node) Parent { - name, _ := n.Latest.Lookup(kubernetes.Name) - return Parent{ - ID: n.ID, - Label: name, - TopologyID: topology, - } - } -} - -func ecsTaskParent(n report.Node) Parent { - family, _ := n.Latest.Lookup(awsecs.TaskFamily) - return Parent{ - ID: n.ID, - Label: family, - TopologyID: "ecs-tasks", - } -} - -func ecsServiceParent(n report.Node) Parent { +func ecsServiceParentLabel(n report.Node) string { _, name, _ := report.ParseECSServiceNodeID(n.ID) - return Parent{ - ID: n.ID, - Label: name, - TopologyID: "ecs-services", - } + return name } -func containerImageParent(n report.Node) Parent { +func containerImageParentLabel(n report.Node) string { name, _ := report.ParseContainerImageNodeID(n.ID) - return Parent{ - ID: n.ID, - Label: name, - TopologyID: "containers-by-image", - } -} - -func hostParent(n report.Node) Parent { - hostName, _ := n.Latest.Lookup(host.HostName) - return Parent{ - ID: n.ID, - Label: hostName, - TopologyID: "hosts", - } + return name } From 3656965ae7432f55684d1c712d88f4c6e5380f1b Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Fri, 7 Apr 2017 10:10:52 -0700 Subject: [PATCH 07/10] Refactor Map2Parent and family into one function This greatly improves code reuse while keeping the behaviour flexible --- render/ecs.go | 54 +++------------------- render/pod.go | 125 ++++++++++++++++++++------------------------------ 2 files changed, 57 insertions(+), 122 deletions(-) diff --git a/render/ecs.go b/render/ecs.go index bf46d2c841..50eef7226b 100644 --- a/render/ecs.go +++ b/render/ecs.go @@ -1,9 +1,6 @@ package render import ( - "strings" - - "github.com/weaveworks/scope/probe/docker" "github.com/weaveworks/scope/report" ) @@ -13,8 +10,11 @@ var ECSTaskRenderer = ConditionalRenderer(renderECSTopologies, PropagateSingleMetrics(report.Container), MakeReduce( MakeMap( - MapContainer2ECSTask, - ContainerWithImageNameRenderer, + Map2Parent(report.ECSTask, UnmanagedID, nil), + MakeFilter( + IsRunning, + ContainerWithImageNameRenderer, + ), ), SelectECSTask, ), @@ -27,7 +27,7 @@ var ECSServiceRenderer = ConditionalRenderer(renderECSTopologies, PropagateSingleMetrics(report.ECSTask), MakeReduce( MakeMap( - Map2Parent(report.ECSService), + Map2Parent(report.ECSService, "", nil), ECSTaskRenderer, ), SelectECSService, @@ -35,48 +35,6 @@ var ECSServiceRenderer = ConditionalRenderer(renderECSTopologies, ), ) -// MapContainer2ECSTask maps container Nodes to ECS Task -// Nodes. -// -// If this function is given a node without an ECS Task parent -// (including other pseudo nodes), it will produce an "Unmanaged" -// pseudo node. -// -// TODO: worth merging with MapContainer2Pod? -func MapContainer2ECSTask(n report.Node, _ report.Networks) report.Nodes { - // Uncontained becomes unmanaged in the tasks view - if strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) { - id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n)) - node := NewDerivedPseudoNode(id, n) - return report.Nodes{id: node} - } - - // Propagate all pseudo nodes - if n.Topology == Pseudo { - return report.Nodes{n.ID: n} - } - - // Ignore non-running containers - if state, ok := n.Latest.Lookup(docker.ContainerState); ok && state != docker.StateRunning { - return report.Nodes{} - } - - taskIDSet, ok := n.Parents.Lookup(report.ECSTask) - if !ok || len(taskIDSet) == 0 { - id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n)) - node := NewDerivedPseudoNode(id, n) - return report.Nodes{id: node} - } - nodeID := taskIDSet[0] - node := NewDerivedNode(nodeID, n).WithTopology(report.ECSTask) - // Propagate parent service - if serviceIDSet, ok := n.Parents.Lookup(report.ECSService); ok { - node = node.WithParents(report.MakeSets().Add(report.ECSService, serviceIDSet)) - } - node.Counters = node.Counters.Add(n.Topology, 1) - return report.Nodes{nodeID: node} -} - func renderECSTopologies(rpt report.Report) bool { return len(rpt.ECSTask.Nodes)+len(rpt.ECSService.Nodes) >= 1 } diff --git a/render/pod.go b/render/pod.go index a774fcf464..81b59847c7 100644 --- a/render/pod.go +++ b/render/pod.go @@ -18,6 +18,11 @@ func renderKubernetesTopologies(rpt report.Report) bool { return len(rpt.Pod.Nodes)+len(rpt.Service.Nodes)+len(rpt.Deployment.Nodes)+len(rpt.ReplicaSet.Nodes) >= 1 } +func isPauseContainer(n report.Node) bool { + image, ok := n.Latest.Lookup(docker.ImageName) + return ok && kubernetes.IsPauseImageName(image) +} + // PodRenderer is a Renderer which produces a renderable kubernetes // graph by merging the container graph and the pods topology. var PodRenderer = ConditionalRenderer(renderKubernetesTopologies, @@ -30,8 +35,14 @@ var PodRenderer = ConditionalRenderer(renderKubernetesTopologies, PropagateSingleMetrics(report.Container), MakeReduce( MakeMap( - MapContainer2Pod, - ContainerWithImageNameRenderer, + Map2Parent(report.Pod, UnmanagedID, nil), + MakeFilter( + ComposeFilterFuncs( + IsRunning, + Complement(isPauseContainer), + ), + ContainerWithImageNameRenderer, + ), ), ShortLivedConnectionJoin(SelectPod, MapPod2IP), SelectPod, @@ -47,7 +58,7 @@ var PodServiceRenderer = ConditionalRenderer(renderKubernetesTopologies, PropagateSingleMetrics(report.Pod), MakeReduce( MakeMap( - Map2Service, + Map2Parent(report.Service, "", nil), PodRenderer, ), SelectService, @@ -62,7 +73,7 @@ var DeploymentRenderer = ConditionalRenderer(renderKubernetesTopologies, PropagateSingleMetrics(report.ReplicaSet), MakeReduce( MakeMap( - Map2Deployment, + Map2Parent(report.Deployment, "", mapPodCounts), ReplicaSetRenderer, ), SelectDeployment, @@ -77,7 +88,7 @@ var ReplicaSetRenderer = ConditionalRenderer(renderKubernetesTopologies, PropagateSingleMetrics(report.Pod), MakeReduce( MakeMap( - Map2ReplicaSet, + Map2Parent(report.ReplicaSet, "", nil), PodRenderer, ), SelectReplicaSet, @@ -85,54 +96,12 @@ var ReplicaSetRenderer = ConditionalRenderer(renderKubernetesTopologies, ), ) -// MapContainer2Pod maps container Nodes to pod -// Nodes. -// -// If this function is given a node without a kubernetes_pod_id -// (including other pseudo nodes), it will produce an "Unmanaged" -// pseudo node. -// -// Otherwise, this function will produce a node with the correct ID -// format for a container, but without any Major or Minor labels. -// It does not have enough info to do that, and the resulting graph -// must be merged with a container graph to get that info. -func MapContainer2Pod(n report.Node, _ report.Networks) report.Nodes { - // Uncontained becomes unmanaged in the pods view - if strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) { - id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n)) - node := NewDerivedPseudoNode(id, n) - return report.Nodes{id: node} - } - - // Propagate all pseudo nodes - if n.Topology == Pseudo { - return report.Nodes{n.ID: n} - } - - // Ignore non-running containers - if state, ok := n.Latest.Lookup(docker.ContainerState); ok && state != docker.StateRunning { - return report.Nodes{} - } - - // Ignore pause containers - if image, ok := n.Latest.Lookup(docker.ImageName); ok && kubernetes.IsPauseImageName(image) { - return report.Nodes{} +func mapPodCounts(parent, original report.Node) report.Node { + // When mapping ReplicaSets to Deployments, we want to propagate the Pods counter + if count, ok := original.Counters.Lookup(report.Pod); ok { + parent.Counters = parent.Counters.Add(report.Pod, count) } - - // Otherwise, if some some reason the container doesn't have a pod uid (maybe - // slightly out of sync reports, or its not in a pod), make it part of unmanaged. - uid, ok := n.Latest.Lookup(docker.LabelPrefix + "io.kubernetes.pod.uid") - if !ok { - id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n)) - node := NewDerivedPseudoNode(id, n) - return report.Nodes{id: node} - } - - id := report.MakePodNodeID(uid) - node := NewDerivedNode(id, n). - WithTopology(report.Pod) - node.Counters = node.Counters.Add(n.Topology, 1) - return report.Nodes{id: node} + return parent } // MapPod2IP maps pod nodes to their IP address. This allows pods to @@ -152,42 +121,50 @@ func MapPod2IP(m report.Node) []string { return []string{report.MakeScopedEndpointNodeID("", ip, "")} } -// The various ways of grouping pods -var ( - Map2Service = Map2Parent(report.Service) - Map2Deployment = Map2Parent(report.Deployment) - Map2ReplicaSet = Map2Parent(report.ReplicaSet) -) - -// Map2Parent maps Nodes to some parent grouping. -func Map2Parent(topology string) func(n report.Node, _ report.Networks) report.Nodes { +// Map2Parent returns a MapFunc which maps Nodes to some parent grouping. +func Map2Parent( + // The topology ID of the parents + topology string, + // Either the ID prefix of the pseudo node to use for nodes without + // any parents in the group, eg. UnmanagedID, or "" to drop nodes without any parents. + noParentsPseudoID string, + // Optional (can be nil) function to modify any parent nodes, + // eg. to copy over details from the original node. + modifyMappedNode func(parent, original report.Node) report.Node, +) MapFunc { return func(n report.Node, _ report.Networks) report.Nodes { + // Uncontained becomes Unmanaged/whatever if noParentsPseudoID is set + if noParentsPseudoID != "" && strings.HasPrefix(n.ID, MakePseudoNodeID(UncontainedID)) { + id := MakePseudoNodeID(noParentsPseudoID, report.ExtractHostID(n)) + node := NewDerivedPseudoNode(id, n) + return report.Nodes{id: node} + } + // Propagate all pseudo nodes if n.Topology == Pseudo { return report.Nodes{n.ID: n} } - // Otherwise, if some some reason the node doesn't have any of these ids - // (maybe slightly out of sync reports, or its not in this group), just - // drop it + // If some some reason the node doesn't have any of these ids + // (maybe slightly out of sync reports, or its not in this group), + // either drop it or put it in Uncontained/Unmanaged/whatever if one was given groupIDs, ok := n.Parents.Lookup(topology) - if !ok { - return report.Nodes{} + if !ok || len(groupIDs) == 0 { + if noParentsPseudoID == "" { + return report.Nodes{} + } + id := MakePseudoNodeID(UnmanagedID, report.ExtractHostID(n)) + node := NewDerivedPseudoNode(id, n) + return report.Nodes{id: node} } result := report.Nodes{} for _, id := range groupIDs { node := NewDerivedNode(id, n).WithTopology(topology) node.Counters = node.Counters.Add(n.Topology, 1) - - // When mapping replica(tionController)s(ets) to deployments - // we must propagate the pod counter. - if n.Topology != report.Pod { - if count, ok := n.Counters.Lookup(report.Pod); ok { - node.Counters = node.Counters.Add(report.Pod, count) - } + if modifyMappedNode != nil { + node = modifyMappedNode(node, n) } - result[id] = node } return result From 7ba3555fe6143809f9d1714e781002f85250da3a Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Fri, 7 Apr 2017 10:49:47 -0700 Subject: [PATCH 08/10] report: Reintroduce explicit topology listing for Walk functions To avoid needing to allocate a new map every time, since we're already hitting GC-related perf issues --- report/report.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/report/report.go b/report/report.go index 9c6ed506a4..a95f0a9b1e 100644 --- a/report/report.go +++ b/report/report.go @@ -234,19 +234,25 @@ func (r Report) Topologies() []Topology { // WalkTopologies iterates through the Topologies of the report, // potentially modifying them func (r *Report) WalkTopologies(f func(*Topology)) { - for _, t := range r.TopologyMap() { - f(t) - } + var dummy Report + r.WalkPairedTopologies(&dummy, func(t, _ *Topology) { f(t) }) } // WalkPairedTopologies iterates through the Topologies of this and another report, // potentially modifying one or both. func (r *Report) WalkPairedTopologies(o *Report, f func(*Topology, *Topology)) { - rMap := r.TopologyMap() - oMap := o.TopologyMap() - for name := range rMap { - f(rMap[name], oMap[name]) - } + f(&r.Endpoint, &o.Endpoint) + f(&r.Process, &o.Process) + f(&r.Container, &o.Container) + f(&r.ContainerImage, &o.ContainerImage) + f(&r.Pod, &o.Pod) + f(&r.Service, &o.Service) + f(&r.Deployment, &o.Deployment) + f(&r.ReplicaSet, &o.ReplicaSet) + f(&r.Host, &o.Host) + f(&r.Overlay, &o.Overlay) + f(&r.ECSTask, &o.ECSTask) + f(&r.ECSService, &o.ECSService) } // Topology gets a topology by name From 9f0f120bc5d0effa4878819629b5d73c9c7f2a9b Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Mon, 10 Apr 2017 15:06:38 -0700 Subject: [PATCH 09/10] Remove explicit listing of api topologies in render/detailed/node specs Instead, we can infer them from the render topology and the primaryAPITopology map --- render/detailed/node.go | 48 ++++++++++++++++++-------------------- render/detailed/summary.go | 1 + 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/render/detailed/node.go b/render/detailed/node.go index cd637c688d..fb24f09e89 100644 --- a/render/detailed/node.go +++ b/render/detailed/node.go @@ -134,8 +134,7 @@ var ( { topologyID: report.Host, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "hosts", - Label: "Hosts", + Label: "Hosts", Columns: []Column{ {ID: host.CPUUsage, Label: "CPU", Datatype: "number"}, {ID: host.MemoryUsage, Label: "Memory", Datatype: "number"}, @@ -145,8 +144,7 @@ var ( { topologyID: report.Service, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "services", - Label: "Services", + Label: "Services", Columns: []Column{ {ID: report.Pod, Label: "# Pods", Datatype: "number"}, {ID: kubernetes.IP, Label: "IP", Datatype: "ip"}, @@ -156,8 +154,7 @@ var ( { topologyID: report.ReplicaSet, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "replica-sets", - Label: "Replica Sets", + Label: "Replica Sets", Columns: []Column{ {ID: report.Pod, Label: "# Pods", Datatype: "number"}, {ID: kubernetes.ObservedGeneration, Label: "Observed Gen.", Datatype: "number"}, @@ -167,8 +164,7 @@ var ( { topologyID: report.Pod, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "pods", - Label: "Pods", + Label: "Pods", Columns: []Column{ {ID: kubernetes.State, Label: "State"}, @@ -180,8 +176,7 @@ var ( { topologyID: report.ECSService, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "ecs-services", - Label: "Services", + Label: "Services", Columns: []Column{ {ID: awsecs.ServiceRunningCount, Label: "Running", Datatype: "number"}, {ID: awsecs.ServiceDesiredCount, Label: "Desired", Datatype: "number"}, @@ -191,8 +186,7 @@ var ( { topologyID: report.ECSTask, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "ecs-tasks", - Label: "Tasks", + Label: "Tasks", Columns: []Column{ {ID: awsecs.CreatedAt, Label: "Created At", Datatype: "datetime"}, }, @@ -201,8 +195,7 @@ var ( { topologyID: report.Container, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "containers", - Label: "Containers", Columns: []Column{ + Label: "Containers", Columns: []Column{ {ID: docker.CPUTotalUsage, Label: "CPU", Datatype: "number"}, {ID: docker.MemoryUsage, Label: "Memory", Datatype: "number"}, }, @@ -211,8 +204,7 @@ var ( { topologyID: report.Process, NodeSummaryGroup: NodeSummaryGroup{ - TopologyID: "processes", - Label: "Processes", Columns: []Column{ + Label: "Processes", Columns: []Column{ {ID: process.PID, Label: "PID", Datatype: "number"}, {ID: process.CPUUsage, Label: "CPU", Datatype: "number"}, {ID: process.MemoryUsage, Label: "Memory", Datatype: "number"}, @@ -248,13 +240,19 @@ func children(r report.Report, n report.Node) []NodeSummaryGroup { nodeSummaryGroups := []NodeSummaryGroup{} // Apply specific group specs in the order they're listed for _, spec := range nodeSummaryGroupSpecs { - if len(summaries[spec.topologyID]) > 0 { - sort.Sort(nodeSummariesByID(summaries[spec.TopologyID])) - group := spec.NodeSummaryGroup - group.Nodes = summaries[spec.topologyID] - nodeSummaryGroups = append(nodeSummaryGroups, group) - delete(summaries, spec.topologyID) + if len(summaries[spec.topologyID]) == 0 { + continue } + apiTopology, ok := primaryAPITopology[spec.topologyID] + if !ok { + continue + } + sort.Sort(nodeSummariesByID(summaries[spec.topologyID])) + group := spec.NodeSummaryGroup + group.Nodes = summaries[spec.topologyID] + group.TopologyID = apiTopology + nodeSummaryGroups = append(nodeSummaryGroups, group) + delete(summaries, spec.topologyID) } // As a fallback, in case a topology has no group spec defined, add any remaining at the end for topologyID, nodeSummaries := range summaries { @@ -271,9 +269,9 @@ func children(r report.Report, n report.Node) []NodeSummaryGroup { } sort.Sort(nodeSummariesByID(nodeSummaries)) group := NodeSummaryGroup{ - ID: apiTopology, - Label: topology.LabelPlural, - Columns: []Column{}, + TopologyID: apiTopology, + Label: topology.LabelPlural, + Columns: []Column{}, } nodeSummaryGroups = append(nodeSummaryGroups, group) } diff --git a/render/detailed/summary.go b/render/detailed/summary.go index 6cde8d49a8..cd7f457b19 100644 --- a/render/detailed/summary.go +++ b/render/detailed/summary.go @@ -84,6 +84,7 @@ var templates = map[string]struct{ Label, LabelMinor string }{ // For each report.Topology, map to a 'primary' API topology. This can then be used in a variety of places. var primaryAPITopology = map[string]string{ + report.Process: "processes", report.Container: "containers", report.ContainerImage: "containers-by-image", report.Pod: "pods", From 18ba2c4e38db3c76451fc666ac1a1047904b0a4c Mon Sep 17 00:00:00 2001 From: Mike Lang Date: Tue, 13 Dec 2016 15:14:53 -0800 Subject: [PATCH 10/10] ecs: Also make service a parent of task --- probe/awsecs/reporter.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/probe/awsecs/reporter.go b/probe/awsecs/reporter.go index 5e4849ce0a..9980b321af 100644 --- a/probe/awsecs/reporter.go +++ b/probe/awsecs/reporter.go @@ -184,6 +184,8 @@ func (r Reporter) Tag(rpt report.Report) (report.Report, error) { if serviceName, ok := ecsInfo.TaskServiceMap[taskArn]; ok { serviceID := report.MakeECSServiceNodeID(cluster, serviceName) parentsSets = parentsSets.Add(report.ECSService, report.MakeStringSet(serviceID)) + // in addition, make service parent of task + rpt.ECSTask.Nodes[taskID] = rpt.ECSTask.Nodes[taskID].WithParents(report.MakeSets().Add(report.ECSService, report.MakeStringSet(serviceID))) } for _, containerID := range info.ContainerIDs { if containerNode, ok := rpt.Container.Nodes[containerID]; ok {