From 8a6f4488ba68fa0152e5856eca51c14efa73c141 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Fri, 12 Aug 2016 11:37:07 -0400 Subject: [PATCH 1/3] UPSTREAM: 30277: Avoid computing DeepEqual in controllers all the time --- .../kubernetes/pkg/client/cache/reflector.go | 6 +----- .../pkg/controller/daemon/controller.go | 13 +++++++------ .../deployment/deployment_controller.go | 17 ++++++++++------- .../controller/endpoint/endpoints_controller.go | 8 +++++--- .../kubernetes/pkg/controller/job/controller.go | 9 +++++---- .../kubernetes/pkg/controller/petset/pet_set.go | 8 +++++--- .../pkg/controller/replicaset/replica_set.go | 9 +++++---- .../controller/replicaset/replica_set_test.go | 6 ++++++ .../replication/replication_controller.go | 9 +++++---- .../replication/replication_controller_test.go | 6 ++++++ 10 files changed, 55 insertions(+), 36 deletions(-) diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go b/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go index 3a5025a28a7e..026a52cba60c 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go +++ b/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go @@ -69,8 +69,6 @@ type Reflector struct { resyncPeriod time.Duration // now() returns current time - exposed for testing purposes now func() time.Time - // nextResync is approximate time of next resync (0 if not scheduled) - nextResync time.Time // lastSyncResourceVersion is the resource version token last // observed when doing a sync with the underlying store // it is thread safe, but not synchronized with the underlying store @@ -234,14 +232,12 @@ var ( // required, and a cleanup function. func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { if r.resyncPeriod == 0 { - r.nextResync = time.Time{} return neverExitWatch, func() bool { return false } } // The cleanup function is required: imagine the scenario where watches // always fail so we end up listing frequently. Then, if we don't // manually stop the timer, we could end up with many timers active // concurrently. - r.nextResync = r.now().Add(r.resyncPeriod) t := time.NewTimer(r.resyncPeriod) return t.C, t.Stop } @@ -285,7 +281,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { case <-stopCh: return } - glog.V(4).Infof("%s: next resync planned for %#v, forcing now", r.name, r.nextResync) + glog.V(4).Infof("%s: forcing resync", r.name) if err := r.store.Resync(); err != nil { resyncerrc <- err return diff --git a/vendor/k8s.io/kubernetes/pkg/controller/daemon/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/daemon/controller.go index ca7935b6e847..3cbd4f4d7527 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/daemon/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/daemon/controller.go @@ -369,16 +369,17 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) { // up. If the labels of the pod have changed we need to awaken both the old // and new set. old and cur must be *api.Pod types. func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - // A periodic relist will send update events for all known pods. + curPod := cur.(*api.Pod) + oldPod := old.(*api.Pod) + if curPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. return } - curPod := cur.(*api.Pod) glog.V(4).Infof("Pod %s updated.", curPod.Name) if curDS := dsc.getPodDaemonSet(curPod); curDS != nil { dsc.enqueueDaemonSet(curDS) } - oldPod := old.(*api.Pod) // If the labels have not changed, then the daemon set responsible for // the pod is the same as it was before. In that case we have enqueued the daemon // set above, and do not have to enqueue the set again. @@ -442,8 +443,8 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) { func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { oldNode := old.(*api.Node) curNode := cur.(*api.Node) - if api.Semantic.DeepEqual(oldNode.Name, curNode.Name) && api.Semantic.DeepEqual(oldNode.Namespace, curNode.Namespace) && api.Semantic.DeepEqual(oldNode.Labels, curNode.Labels) { - // A periodic relist will send update events for all known pods. + if reflect.DeepEqual(oldNode.Labels, curNode.Labels) { + // If node labels didn't change, we can ignore this update. return } dsList, err := dsc.dsStore.List() diff --git a/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go index ada8a327d0f6..83f691e2e24f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/deployment/deployment_controller.go @@ -244,19 +244,20 @@ func (dc *DeploymentController) getDeploymentForReplicaSet(rs *extensions.Replic // awaken both the old and new deployments. old and cur must be *extensions.ReplicaSet // types. func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - // A periodic relist will send update events for all known controllers. + curRS := cur.(*extensions.ReplicaSet) + oldRS := old.(*extensions.ReplicaSet) + if curRS.ResourceVersion == oldRS.ResourceVersion { + // Periodic resync will send update events for all known replica sets. + // Two different versions of the same replica set will always have different RVs. return } // TODO: Write a unittest for this case - curRS := cur.(*extensions.ReplicaSet) glog.V(4).Infof("ReplicaSet %s updated.", curRS.Name) if d := dc.getDeploymentForReplicaSet(curRS); d != nil { dc.enqueueDeployment(d) } // A number of things could affect the old deployment: labels changing, // pod template changing, etc. - oldRS := old.(*extensions.ReplicaSet) if !api.Semantic.DeepEqual(oldRS, curRS) { if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil { dc.enqueueDeployment(oldD) @@ -326,11 +327,13 @@ func (dc *DeploymentController) addPod(obj interface{}) { // is updated and wake them up. If anything of the Pods have changed, we need to awaken both // the old and new deployments. old and cur must be *api.Pod types. func (dc *DeploymentController) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - return - } curPod := cur.(*api.Pod) oldPod := old.(*api.Pod) + if curPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. + return + } glog.V(4).Infof("Pod %s updated %#v -> %#v.", curPod.Name, oldPod, curPod) if d := dc.getDeploymentForPod(curPod); d != nil { dc.enqueueDeployment(d) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go index a8a1b0831e47..5923099e5d8d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go @@ -210,17 +210,19 @@ func (e *EndpointController) addPod(obj interface{}) { // and what services it will be a member of, and enqueue the union of these. // old and cur must be *api.Pod types. func (e *EndpointController) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { + newPod := cur.(*api.Pod) + oldPod := old.(*api.Pod) + if newPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. return } - newPod := old.(*api.Pod) services, err := e.getPodServiceMemberships(newPod) if err != nil { glog.Errorf("Unable to get pod %v/%v's service memberships: %v", newPod.Namespace, newPod.Name, err) return } - oldPod := cur.(*api.Pod) // Only need to get the old services if the labels changed. if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) || !hostNameAndDomainAreEqual(newPod, oldPod) { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/job/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/job/controller.go index 964a4ec14d6e..fc400778b263 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/job/controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/job/controller.go @@ -197,11 +197,13 @@ func (jm *JobController) addPod(obj interface{}) { // If the labels of the pod have changed we need to awaken both the old // and new job. old and cur must be *api.Pod types. func (jm *JobController) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - // A periodic relist will send update events for all known pods. + curPod := cur.(*api.Pod) + oldPod := old.(*api.Pod) + if curPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. return } - curPod := cur.(*api.Pod) if curPod.DeletionTimestamp != nil { // when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period, // and after such time has passed, the kubelet actually deletes it from the store. We receive an update @@ -213,7 +215,6 @@ func (jm *JobController) updatePod(old, cur interface{}) { if job := jm.getPodJob(curPod); job != nil { jm.enqueueController(job) } - oldPod := old.(*api.Pod) // Only need to get the old job if the labels changed. if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { // If the old and new job are the same, the first one that syncs diff --git a/vendor/k8s.io/kubernetes/pkg/controller/petset/pet_set.go b/vendor/k8s.io/kubernetes/pkg/controller/petset/pet_set.go index a34eb6d5c2e6..73641e2a2878 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/petset/pet_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/petset/pet_set.go @@ -166,11 +166,13 @@ func (psc *PetSetController) addPod(obj interface{}) { // updatePod adds the petset for the current and old pods to the sync queue. // If the labels of the pod didn't change, this method enqueues a single petset. func (psc *PetSetController) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - return - } curPod := cur.(*api.Pod) oldPod := old.(*api.Pod) + if curPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. + return + } ps := psc.getPetSetForPod(curPod) if ps == nil { return diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go index 6d6ddffb51b1..b686914c90dd 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go @@ -320,12 +320,13 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) { // up. If the labels of the pod have changed we need to awaken both the old // and new replica set. old and cur must be *api.Pod types. func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - // A periodic relist will send update events for all known pods. - return - } curPod := cur.(*api.Pod) oldPod := old.(*api.Pod) + if curPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. + return + } glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) if curPod.DeletionTimestamp != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go index fbdfc497fbe9..baa52005686c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go @@ -551,8 +551,10 @@ func TestUpdatePods(t *testing.T) { // then update its labels to match testRSSpec2. We expect to receive a sync // request for both replica sets. pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0] + pod1.ResourceVersion = "1" pod2 := pod1 pod2.Labels = labelMap2 + pod2.ResourceVersion = "2" manager.updatePod(&pod1, &pod2) expected := sets.NewString(testRSSpec1.Name, testRSSpec2.Name) for _, name := range expected.List() { @@ -571,6 +573,7 @@ func TestUpdatePods(t *testing.T) { // its labels to match no replica set. We expect to receive a sync request // for testRSSpec1. pod2.Labels = make(map[string]string) + pod2.ResourceVersion = "2" manager.updatePod(&pod1, &pod2) expected = sets.NewString(testRSSpec1.Name) for _, name := range expected.List() { @@ -977,6 +980,7 @@ func TestDeletionTimestamp(t *testing.T) { } pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + pod.ResourceVersion = "1" manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)}) // A pod added with a deletion timestamp should decrement deletions, not creations. @@ -996,6 +1000,7 @@ func TestDeletionTimestamp(t *testing.T) { // An update from no deletion timestamp to having one should be treated // as a deletion. oldPod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] + oldPod.ResourceVersion = "2" manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)}) manager.updatePod(&oldPod, &pod) @@ -1021,6 +1026,7 @@ func TestDeletionTimestamp(t *testing.T) { } manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(secondPod)}) oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + oldPod.ResourceVersion = "2" manager.updatePod(&oldPod, &pod) podExp, exists, err = manager.expectations.GetExpectations(rsKey) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go index a4c09be9486a..95dabcbde9e9 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller.go @@ -348,12 +348,13 @@ func (rm *ReplicationManager) addPod(obj interface{}) { // up. If the labels of the pod have changed we need to awaken both the old // and new controller. old and cur must be *api.Pod types. func (rm *ReplicationManager) updatePod(old, cur interface{}) { - if api.Semantic.DeepEqual(old, cur) { - // A periodic relist will send update events for all known pods. - return - } curPod := cur.(*api.Pod) oldPod := old.(*api.Pod) + if curPod.ResourceVersion == oldPod.ResourceVersion { + // Periodic resync will send update events for all known pods. + // Two different versions of the same pod will always have different RVs. + return + } glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta) labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels) if curPod.DeletionTimestamp != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go index 593929c14372..a6a05cc0cf9b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replication/replication_controller_test.go @@ -535,8 +535,10 @@ func TestUpdatePods(t *testing.T) { // testControllerSpec1, then update its labels to match testControllerSpec2. // We expect to receive a sync request for both controllers. pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, testControllerSpec1, "pod").Items[0] + pod1.ResourceVersion = "1" pod2 := pod1 pod2.Labels = testControllerSpec2.Spec.Selector + pod2.ResourceVersion = "2" manager.updatePod(&pod1, &pod2) expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name) for _, name := range expected.List() { @@ -555,6 +557,7 @@ func TestUpdatePods(t *testing.T) { // We update its labels to match no replication controller. We expect to // receive a sync request for testControllerSpec1. pod2.Labels = make(map[string]string) + pod2.ResourceVersion = "2" manager.updatePod(&pod1, &pod2) expected = sets.NewString(testControllerSpec1.Name) for _, name := range expected.List() { @@ -945,6 +948,7 @@ func TestDeletionTimestamp(t *testing.T) { } pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + pod.ResourceVersion = "1" manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)}) // A pod added with a deletion timestamp should decrement deletions, not creations. @@ -964,6 +968,7 @@ func TestDeletionTimestamp(t *testing.T) { // An update from no deletion timestamp to having one should be treated // as a deletion. oldPod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] + oldPod.ResourceVersion = "2" manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)}) manager.updatePod(&oldPod, &pod) @@ -989,6 +994,7 @@ func TestDeletionTimestamp(t *testing.T) { } manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(secondPod)}) oldPod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} + oldPod.ResourceVersion = "2" manager.updatePod(&oldPod, &pod) podExp, exists, err = manager.expectations.GetExpectations(rcKey) From b9b976b907e2b333bff6548a0b5382e0692dd2f1 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Thu, 18 Aug 2016 13:40:09 -0400 Subject: [PATCH 2/3] bump(google/cadvisor): 956e595d948ce8690296d297ba265d5e8649a088 --- Godeps/Godeps.json | 156 +++++++++--------- .../cadvisor/container/docker/handler.go | 5 +- .../container/libcontainer/helpers.go | 15 +- .../devicemapper/thin_pool_watcher.go | 6 +- vendor/github.com/google/cadvisor/fs/fs.go | 48 ++++-- .../google/cadvisor/info/v2/conversion.go | 3 +- .../google/cadvisor/metrics/prometheus.go | 9 +- .../google/cadvisor/version/VERSION | 2 +- 8 files changed, 129 insertions(+), 115 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 7aba4a1e4af5..f3d3b1bdc100 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1321,198 +1321,198 @@ }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/utils/tail", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.23.6", - "Rev": "4dbefc9b671b81257973a33211fb12370c1a526e" + "Comment": "v0.23.9-4-g956e595", + "Rev": "956e595d948ce8690296d297ba265d5e8649a088" }, { "ImportPath": "github.com/google/gofuzz", diff --git a/vendor/github.com/google/cadvisor/container/docker/handler.go b/vendor/github.com/google/cadvisor/container/docker/handler.go index c1fb15864167..fd3e2fdc4ba9 100644 --- a/vendor/github.com/google/cadvisor/container/docker/handler.go +++ b/vendor/github.com/google/cadvisor/container/docker/handler.go @@ -275,10 +275,7 @@ func (h *dockerFsHandler) Usage() (uint64, uint64) { if h.thinPoolWatcher != nil { thinPoolUsage, err := h.thinPoolWatcher.GetUsage(h.deviceID) if err != nil { - // TODO: ideally we should keep track of how many times we failed to get the usage for this - // device vs how many refreshes of the cache there have been, and display an error e.g. if we've - // had at least 1 refresh and we still can't find the device. - glog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err) + glog.Errorf("unable to get fs usage from thin pool for device %v: %v", h.deviceID, err) } else { baseUsage = thinPoolUsage usage += thinPoolUsage diff --git a/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go b/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go index e030d1ad740e..65c72ab50670 100644 --- a/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -387,23 +387,16 @@ func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) { ret.Memory.ContainerData.Pgmajfault = v ret.Memory.HierarchicalData.Pgmajfault = v } - if v, ok := s.MemoryStats.Stats["total_inactive_anon"]; ok { - workingSet := ret.Memory.Usage + + workingSet := ret.Memory.Usage + if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok { if workingSet < v { workingSet = 0 } else { workingSet -= v } - - if v, ok := s.MemoryStats.Stats["total_inactive_file"]; ok { - if workingSet < v { - workingSet = 0 - } else { - workingSet -= v - } - } - ret.Memory.WorkingSet = workingSet } + ret.Memory.WorkingSet = workingSet } func toContainerStats3(libcontainerStats *libcontainer.Stats, ret *info.ContainerStats) { diff --git a/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go b/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go index 6f5666a02fe3..bf2300a33bd3 100644 --- a/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go +++ b/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go @@ -74,7 +74,7 @@ func (w *ThinPoolWatcher) Start() { // print latency for refresh duration := time.Since(start) - glog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration) + glog.V(3).Infof("thin_ls(%d) took %s", start.Unix(), duration) } } } @@ -115,7 +115,7 @@ func (w *ThinPoolWatcher) Refresh() error { } if currentlyReserved { - glog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName) + glog.V(4).Infof("metadata for %v is currently reserved; releasing", w.poolName) _, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage) if err != nil { err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err) @@ -123,7 +123,7 @@ func (w *ThinPoolWatcher) Refresh() error { } } - glog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName) + glog.Infof("reserving metadata snapshot for thin-pool %v", w.poolName) // NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup // message'. It's not needed for thin pools. if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil { diff --git a/vendor/github.com/google/cadvisor/fs/fs.go b/vendor/github.com/google/cadvisor/fs/fs.go index dafc7bf70b15..4bc39e52dad7 100644 --- a/vendor/github.com/google/cadvisor/fs/fs.go +++ b/vendor/github.com/google/cadvisor/fs/fs.go @@ -99,46 +99,64 @@ func NewFsInfo(context Context) (FsInfo, error) { if err != nil { return nil, err } + + // Avoid devicemapper container mounts - these are tracked by the ThinPoolWatcher + excluded := []string{fmt.Sprintf("%s/devicemapper/mnt", context.Docker.Root)} fsInfo := &RealFsInfo{ - partitions: make(map[string]partition, 0), + partitions: processMounts(mounts, excluded), labels: make(map[string]string, 0), dmsetup: devicemapper.NewDmsetupClient(), } + fsInfo.addRktImagesLabel(context, mounts) + // need to call this before the log line below printing out the partitions, as this function may + // add a "partition" for devicemapper to fsInfo.partitions + fsInfo.addDockerImagesLabel(context, mounts) + + glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) + fsInfo.addSystemRootLabel(mounts) + return fsInfo, nil +} + +func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) map[string]partition { + partitions := make(map[string]partition, 0) + supportedFsType := map[string]bool{ // all ext systems are checked through prefix. "btrfs": true, "xfs": true, "zfs": true, } + for _, mount := range mounts { - var Fstype string if !strings.HasPrefix(mount.Fstype, "ext") && !supportedFsType[mount.Fstype] { continue } // Avoid bind mounts. - if _, ok := fsInfo.partitions[mount.Source]; ok { + if _, ok := partitions[mount.Source]; ok { continue } - if mount.Fstype == "zfs" { - Fstype = mount.Fstype + + hasPrefix := false + for _, prefix := range excludedMountpointPrefixes { + if strings.HasPrefix(mount.Mountpoint, prefix) { + hasPrefix = true + break + } + } + if hasPrefix { + continue } - fsInfo.partitions[mount.Source] = partition{ - fsType: Fstype, + + partitions[mount.Source] = partition{ + fsType: mount.Fstype, mountpoint: mount.Mountpoint, major: uint(mount.Major), minor: uint(mount.Minor), } } - fsInfo.addRktImagesLabel(context, mounts) - // need to call this before the log line below printing out the partitions, as this function may - // add a "partition" for devicemapper to fsInfo.partitions - fsInfo.addDockerImagesLabel(context, mounts) - - glog.Infof("Filesystem partitions: %+v", fsInfo.partitions) - fsInfo.addSystemRootLabel(mounts) - return fsInfo, nil + return partitions } // getDockerDeviceMapperInfo returns information about the devicemapper device and "partition" if diff --git a/vendor/github.com/google/cadvisor/info/v2/conversion.go b/vendor/github.com/google/cadvisor/info/v2/conversion.go index b6664ecc2313..2e6181203d1f 100644 --- a/vendor/github.com/google/cadvisor/info/v2/conversion.go +++ b/vendor/github.com/google/cadvisor/info/v2/conversion.go @@ -24,7 +24,8 @@ import ( func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats { var result []MachineFsStats - for _, stat := range fsStats { + for i := range fsStats { + stat := fsStats[i] readDuration := time.Millisecond * time.Duration(stat.ReadTime) writeDuration := time.Millisecond * time.Duration(stat.WriteTime) ioDuration := time.Millisecond * time.Duration(stat.IoTime) diff --git a/vendor/github.com/google/cadvisor/metrics/prometheus.go b/vendor/github.com/google/cadvisor/metrics/prometheus.go index 74d369640d96..ec10dfac9ca5 100644 --- a/vendor/github.com/google/cadvisor/metrics/prometheus.go +++ b/vendor/github.com/google/cadvisor/metrics/prometheus.go @@ -499,6 +499,11 @@ func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) { c.errors.Collect(ch) } +const ( + containerLabelPrefix = "container_label_" + containerEnvPrefix = "container_env_" +) + func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) { containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1}) if err != nil { @@ -529,11 +534,11 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) } for k, v := range container.Spec.Labels { - baseLabels = append(baseLabels, sanitizeLabelName(k)) + baseLabels = append(baseLabels, sanitizeLabelName(containerLabelPrefix+k)) baseLabelValues = append(baseLabelValues, v) } for k, v := range container.Spec.Envs { - baseLabels = append(baseLabels, sanitizeLabelName(k)) + baseLabels = append(baseLabels, sanitizeLabelName(containerEnvPrefix+k)) baseLabelValues = append(baseLabelValues, v) } diff --git a/vendor/github.com/google/cadvisor/version/VERSION b/vendor/github.com/google/cadvisor/version/VERSION index 40a6dfede5db..174f076accad 100644 --- a/vendor/github.com/google/cadvisor/version/VERSION +++ b/vendor/github.com/google/cadvisor/version/VERSION @@ -1 +1 @@ -0.23.4 +0.23.9 From 0b3fd1240d90015deee6a94c76f9d5c983d2aa05 Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Fri, 5 Aug 2016 11:07:21 -0400 Subject: [PATCH 3/3] UPSTREAM: google/cadvisor: 1359: Make ThinPoolWatcher loglevel consistent --- .../github.com/google/cadvisor/container/docker/handler.go | 5 ++++- .../google/cadvisor/devicemapper/thin_pool_watcher.go | 6 +++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/vendor/github.com/google/cadvisor/container/docker/handler.go b/vendor/github.com/google/cadvisor/container/docker/handler.go index fd3e2fdc4ba9..c1fb15864167 100644 --- a/vendor/github.com/google/cadvisor/container/docker/handler.go +++ b/vendor/github.com/google/cadvisor/container/docker/handler.go @@ -275,7 +275,10 @@ func (h *dockerFsHandler) Usage() (uint64, uint64) { if h.thinPoolWatcher != nil { thinPoolUsage, err := h.thinPoolWatcher.GetUsage(h.deviceID) if err != nil { - glog.Errorf("unable to get fs usage from thin pool for device %v: %v", h.deviceID, err) + // TODO: ideally we should keep track of how many times we failed to get the usage for this + // device vs how many refreshes of the cache there have been, and display an error e.g. if we've + // had at least 1 refresh and we still can't find the device. + glog.V(5).Infof("unable to get fs usage from thin pool for device %s: %v", h.deviceID, err) } else { baseUsage = thinPoolUsage usage += thinPoolUsage diff --git a/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go b/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go index bf2300a33bd3..6f5666a02fe3 100644 --- a/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go +++ b/vendor/github.com/google/cadvisor/devicemapper/thin_pool_watcher.go @@ -74,7 +74,7 @@ func (w *ThinPoolWatcher) Start() { // print latency for refresh duration := time.Since(start) - glog.V(3).Infof("thin_ls(%d) took %s", start.Unix(), duration) + glog.V(5).Infof("thin_ls(%d) took %s", start.Unix(), duration) } } } @@ -115,7 +115,7 @@ func (w *ThinPoolWatcher) Refresh() error { } if currentlyReserved { - glog.V(4).Infof("metadata for %v is currently reserved; releasing", w.poolName) + glog.V(5).Infof("metadata for %v is currently reserved; releasing", w.poolName) _, err = w.dmsetup.Message(w.poolName, 0, releaseMetadataMessage) if err != nil { err = fmt.Errorf("error releasing metadata snapshot for %v: %v", w.poolName, err) @@ -123,7 +123,7 @@ func (w *ThinPoolWatcher) Refresh() error { } } - glog.Infof("reserving metadata snapshot for thin-pool %v", w.poolName) + glog.V(5).Infof("reserving metadata snapshot for thin-pool %v", w.poolName) // NOTE: "0" in the call below is for the 'sector' argument to 'dmsetup // message'. It's not needed for thin pools. if output, err := w.dmsetup.Message(w.poolName, 0, reserveMetadataMessage); err != nil {