Skip to content

Commit

Permalink
Merge pull request #278 from csbell/fw-name
Browse files Browse the repository at this point in the history
Extend ConfigMap to store fwrule names
  • Loading branch information
nicksardo authored Mar 6, 2017
2 parents a34b11f + 68097e9 commit a6e3822
Show file tree
Hide file tree
Showing 9 changed files with 220 additions and 95 deletions.
4 changes: 4 additions & 0 deletions controllers/gce/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ var (
// L7 controller created without specifying the --cluster-uid flag.
DefaultClusterUID = ""

// DefaultFirewallName is the name to user for firewall rules created
// by an L7 controller when the --fireall-rule is not used.
DefaultFirewallName = ""

// Frequency to poll on local stores to sync.
storeSyncPollPeriod = 5 * time.Second
)
Expand Down
12 changes: 7 additions & 5 deletions controllers/gce/controller/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,8 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
}

func TestLbCreateDelete(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
testFirewallName := "quux"
cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName)
lbc := newLoadBalancerController(t, cm, "")
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
"foo.example.com": {
Expand Down Expand Up @@ -240,6 +241,7 @@ func TestLbCreateDelete(t *testing.T) {
unexpected := []int{pm.portMap["foo2svc"], pm.portMap["bar2svc"]}
expected := []int{pm.portMap["foo1svc"], pm.portMap["bar1svc"]}
firewallPorts := sets.NewString()
pm.namer.SetFirewallName(testFirewallName)
firewallName := pm.namer.FrName(pm.namer.FrSuffix())

if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err != nil {
Expand Down Expand Up @@ -290,7 +292,7 @@ func TestLbCreateDelete(t *testing.T) {
}

func TestLbFaultyUpdate(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
inputMap := map[string]utils.FakeIngressRuleValueMap{
"foo.example.com": {
Expand Down Expand Up @@ -327,7 +329,7 @@ func TestLbFaultyUpdate(t *testing.T) {
}

func TestLbDefaulting(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
// Make sure the controller plugs in the default values accepted by GCE.
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
Expand All @@ -345,7 +347,7 @@ func TestLbDefaulting(t *testing.T) {
}

func TestLbNoService(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
inputMap := map[string]utils.FakeIngressRuleValueMap{
"foo.example.com": {
Expand Down Expand Up @@ -389,7 +391,7 @@ func TestLbNoService(t *testing.T) {
}

func TestLbChangeStaticIP(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
inputMap := map[string]utils.FakeIngressRuleValueMap{
"foo.example.com": {
Expand Down
4 changes: 2 additions & 2 deletions controllers/gce/controller/fakes.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,12 @@ type fakeClusterManager struct {
}

// NewFakeClusterManager creates a new fake ClusterManager.
func NewFakeClusterManager(clusterName string) *fakeClusterManager {
func NewFakeClusterManager(clusterName, firewallName string) *fakeClusterManager {
fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName)
fakeBackends := backends.NewFakeBackendServices(func(op int, be *compute.BackendService) error { return nil })
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeHCs := healthchecks.NewFakeHealthChecks()
namer := utils.NewNamer(clusterName)
namer := utils.NewNamer(clusterName, firewallName)

nodePool := instances.NewNodePool(fakeIGs)
nodePool.Init(&instances.FakeZoneLister{Zones: []string{"zone-a"}})
Expand Down
10 changes: 5 additions & 5 deletions controllers/gce/controller/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import (
var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)

func TestZoneListing(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
zoneToNode := map[string][]string{
"zone-1": {"n1"},
Expand All @@ -57,7 +57,7 @@ func TestZoneListing(t *testing.T) {
}

func TestInstancesAddedToZones(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
zoneToNode := map[string][]string{
"zone-1": {"n1", "n2"},
Expand Down Expand Up @@ -92,7 +92,7 @@ func TestInstancesAddedToZones(t *testing.T) {
}

func TestProbeGetter(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
nodePortToHealthCheck := map[int64]string{
3001: "/healthz",
Expand All @@ -110,7 +110,7 @@ func TestProbeGetter(t *testing.T) {
}

func TestProbeGetterNamedPort(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")
nodePortToHealthCheck := map[int64]string{
3001: "/healthz",
Expand All @@ -133,7 +133,7 @@ func TestProbeGetterNamedPort(t *testing.T) {
}

func TestProbeGetterCrossNamespace(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
lbc := newLoadBalancerController(t, cm, "")

firstPod := &api.Pod{
Expand Down
5 changes: 3 additions & 2 deletions controllers/gce/loadbalancers/loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,8 @@ func TestUpdateUrlMapNoChanges(t *testing.T) {

func TestNameParsing(t *testing.T) {
clusterName := "123"
namer := utils.NewNamer(clusterName)
firewallName := clusterName
namer := utils.NewNamer(clusterName, firewallName)
fullName := namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, namer.LBName("testlb")))
annotationsMap := map[string]string{
fmt.Sprintf("%v/forwarding-rule", utils.K8sAnnotationPrefix): fullName,
Expand Down Expand Up @@ -308,7 +309,7 @@ func TestClusterNameChange(t *testing.T) {
}

func TestInvalidClusterNameChange(t *testing.T) {
namer := utils.NewNamer("test--123")
namer := utils.NewNamer("test--123", "test--123")
if got := namer.GetClusterName(); got != "123" {
t.Fatalf("Expected name 123, got %v", got)
}
Expand Down
118 changes: 86 additions & 32 deletions controllers/gce/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func main() {

if *inCluster || *useRealCloud {
// Create cluster manager
namer, err := newNamer(kubeClient, *clusterName)
namer, err := newNamer(kubeClient, *clusterName, controller.DefaultFirewallName)
if err != nil {
glog.Fatalf("%v", err)
}
Expand All @@ -225,7 +225,7 @@ func main() {
}
} else {
// Create fake cluster manager
clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager
clusterManager = controller.NewFakeClusterManager(*clusterName, controller.DefaultFirewallName).ClusterManager
}

// Start loadbalancer controller
Expand All @@ -247,32 +247,100 @@ func main() {
}
}

func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, error) {
func newNamer(kubeClient client.Interface, clusterName string, fwName string) (*utils.Namer, error) {
name, err := getClusterUID(kubeClient, clusterName)
if err != nil {
return nil, err
}
fw_name, err := getFirewallName(kubeClient, fwName, name)
if err != nil {
return nil, err
}

namer := utils.NewNamer(name)
vault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
namer := utils.NewNamer(name, fw_name)
uidVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)

// Start a goroutine to poll the cluster UID config map
// We don't watch because we know exactly which configmap we want and this
// controller already watches 5 other resources, so it isn't worth the cost
// of another connection and complexity.
go wait.Forever(func() {
uid, found, err := vault.Get()
existing := namer.GetClusterName()
if found && uid != existing {
glog.Infof("Cluster uid changed from %v -> %v", existing, uid)
namer.SetClusterName(uid)
} else if err != nil {
glog.Errorf("Failed to reconcile cluster uid %v, currently set to %v", err, existing)
for _, key := range [...]string{storage.UidDataKey, storage.ProviderDataKey} {
val, found, err := uidVault.Get(key)
if err != nil {
glog.Errorf("Can't read uidConfigMap %v", uidConfigMapName)
} else if !found {
errmsg := fmt.Sprintf("Can't read %v from uidConfigMap %v", key, uidConfigMapName)
if key == storage.UidDataKey {
glog.Errorf(errmsg)
} else {
glog.V(4).Infof(errmsg)
}
} else {

switch key {
case storage.UidDataKey:
if uid := namer.GetClusterName(); uid != val {
glog.Infof("Cluster uid changed from %v -> %v", uid, val)
namer.SetClusterName(val)
}
case storage.ProviderDataKey:
if fw_name := namer.GetFirewallName(); fw_name != val {
glog.Infof("Cluster firewall name changed from %v -> %v", fw_name, val)
namer.SetFirewallName(val)
}
}
}
}
}, 5*time.Second)
return namer, nil
}

// useDefaultOrLookupVault returns either a 'default_name' or if unset, obtains a name from a ConfigMap.
// The returned value follows this priority:
// If the provided 'default_name' is not empty, that name is used.
// This is effectively a client override via a command line flag.
// else, check cfgVault with 'cm_key' as a key and if found, use the associated value
// else, return an empty 'name' and pass along an error iff the configmap lookup is erroneous.
func useDefaultOrLookupVault(cfgVault *storage.ConfigMapVault, cm_key, default_name string) (string, error) {
if default_name != "" {
glog.Infof("Using user provided %v %v", cm_key, default_name)
// Don't save the uid in the vault, so users can rollback through
// setting the accompany flag to ""
return default_name, nil
}
val, found, err := cfgVault.Get(cm_key)
if err != nil {
// This can fail because of:
// 1. No such config map - found=false, err=nil
// 2. No such key in config map - found=false, err=nil
// 3. Apiserver flake - found=false, err!=nil
// It is not safe to proceed in 3.
return "", fmt.Errorf("Failed to retrieve %v: %v, returning empty name", cm_key, err)
} else if !found {
// Not found but safe to proceed.
return "", nil
}
glog.Infof("Using %v = %q saved in ConfigMap", cm_key, val)
return val, nil
}

// getFirewallName returns the firewall rule name to use for this cluster. For
// backwards compatibility, the firewall name will default to the cluster UID.
// Use getFlagOrLookupVault to obtain a stored or overridden value for the firewall name.
// else, use the cluster UID as a backup (this retains backwards compatibility).
func getFirewallName(kubeClient client.Interface, name, cluster_uid string) (string, error) {
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
if fw_name, err := useDefaultOrLookupVault(cfgVault, storage.ProviderDataKey, name); err != nil {
return "", err
} else if fw_name != "" {
return fw_name, cfgVault.Put(storage.ProviderDataKey, fw_name)
} else {
glog.Infof("Using cluster UID %v as firewall name", cluster_uid)
return cluster_uid, cfgVault.Put(storage.ProviderDataKey, cluster_uid)
}
}

// getClusterUID returns the cluster UID. Rules for UID generation:
// If the user specifies a --cluster-uid param it overwrites everything
// else, check UID config map for a previously recorded uid
Expand All @@ -281,26 +349,12 @@ func newNamer(kubeClient client.Interface, clusterName string) (*utils.Namer, er
// else, allocate a new uid
func getClusterUID(kubeClient client.Interface, name string) (string, error) {
cfgVault := storage.NewConfigMapVault(kubeClient, api.NamespaceSystem, uidConfigMapName)
if name != "" {
glog.Infof("Using user provided cluster uid %v", name)
// Don't save the uid in the vault, so users can rollback through
// --cluster-uid=""
if name, err := useDefaultOrLookupVault(cfgVault, storage.UidDataKey, name); err != nil {
return "", err
} else if name != "" {
return name, nil
}

existingUID, found, err := cfgVault.Get()
if found {
glog.Infof("Using saved cluster uid %q", existingUID)
return existingUID, nil
} else if err != nil {
// This can fail because of:
// 1. No such config map - found=false, err=nil
// 2. No such key in config map - found=false, err=nil
// 3. Apiserver flake - found=false, err!=nil
// It is not safe to proceed in 3.
return "", fmt.Errorf("Failed to retrieve current uid: %v, using %q as name", err, name)
}

// Check if the cluster has an Ingress with ip
ings, err := kubeClient.Extensions().Ingresses(api.NamespaceAll).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil {
Expand All @@ -311,10 +365,10 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) {
if len(ing.Status.LoadBalancer.Ingress) != 0 {
c := namer.ParseName(loadbalancers.GCEResourceName(ing.Annotations, "forwarding-rule"))
if c.ClusterName != "" {
return c.ClusterName, cfgVault.Put(c.ClusterName)
return c.ClusterName, cfgVault.Put(storage.UidDataKey, c.ClusterName)
}
glog.Infof("Found a working Ingress, assuming uid is empty string")
return "", cfgVault.Put("")
return "", cfgVault.Put(storage.UidDataKey, "")
}
}

Expand All @@ -329,7 +383,7 @@ func getClusterUID(kubeClient client.Interface, name string) (string, error) {
return "", err
}
uid := fmt.Sprintf("%x", b)
return uid, cfgVault.Put(uid)
return uid, cfgVault.Put(storage.UidDataKey, uid)
}

// getNodePort waits for the Service, and returns it's first node port.
Expand Down
Loading

0 comments on commit a6e3822

Please sign in to comment.