From 0765b15fc5c0d9e03a14682f25948724fafa6836 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Wed, 8 Mar 2023 19:15:53 -0500 Subject: [PATCH] allocrunner: fix health check monitoring for Consul services Services must be canonicalized and have its values interpolated before comparing with the values returned by Consul. --- client/allochealth/tracker.go | 18 +++- client/allochealth/tracker_test.go | 107 ++++++++++++++++++++--- client/allocrunner/alloc_runner_hooks.go | 4 +- client/allocrunner/health_hook.go | 18 +++- client/allocrunner/health_hook_test.go | 29 ++++-- nomad/structs/structs.go | 8 ++ 6 files changed, 155 insertions(+), 29 deletions(-) diff --git a/client/allochealth/tracker.go b/client/allochealth/tracker.go index 7785699bb412..2cba39affcbe 100644 --- a/client/allochealth/tracker.go +++ b/client/allochealth/tracker.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/client/serviceregistration" "github.com/hashicorp/nomad/client/serviceregistration/checks/checkstore" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "golang.org/x/exp/maps" @@ -96,6 +97,9 @@ type Tracker struct { // name -> state taskHealth map[string]*taskHealthState + // taskEnv is used to interpolate dynamic values in services. + taskEnv *taskenv.TaskEnv + // logger is for logging things logger hclog.Logger } @@ -111,6 +115,7 @@ func NewTracker( logger hclog.Logger, alloc *structs.Allocation, allocUpdates *cstructs.AllocListener, + taskEnv *taskenv.TaskEnv, consulClient serviceregistration.Handler, checkStore checkstore.Shim, minHealthyTime time.Duration, @@ -122,6 +127,7 @@ func NewTracker( allocStopped: make(chan struct{}), alloc: alloc, tg: alloc.Job.LookupTaskGroup(alloc.TaskGroup), + taskEnv: taskEnv, minHealthyTime: minHealthyTime, useChecks: useChecks, allocUpdates: allocUpdates, @@ -504,7 +510,7 @@ OUTER: passed := true // scan for missing or unhealthy consul checks - if !evaluateConsulChecks(t.tg, allocReg) { + if !evaluateConsulChecks(t.alloc, t.tg, t.taskEnv, allocReg) { t.setCheckHealth(false) passed = false } @@ -523,14 +529,20 @@ OUTER: } } -func evaluateConsulChecks(tg *structs.TaskGroup, registrations *serviceregistration.AllocRegistration) bool { +func evaluateConsulChecks( + alloc *structs.Allocation, + tg *structs.TaskGroup, + taskEnv *taskenv.TaskEnv, + registrations *serviceregistration.AllocRegistration, +) bool { // First, identify any case where a check definition is missing or outdated // on the Consul side. Note that because check names are not unique, we must // also keep track of the counts on each side and make sure those also match. - services := tg.ConsulServices() + services := taskenv.InterpolateServices(taskEnv, tg.ConsulServices()) expChecks := make(map[string]int) regChecks := make(map[string]int) for _, service := range services { + service.Canonicalize(alloc.JobID, alloc.TaskGroup, service.TaskName, alloc.Namespace) for _, check := range service.Checks { expChecks[check.Name]++ } diff --git a/client/allochealth/tracker_test.go b/client/allochealth/tracker_test.go index 38bfb9f8ea05..483ed727aa5f 100644 --- a/client/allochealth/tracker_test.go +++ b/client/allochealth/tracker_test.go @@ -14,6 +14,7 @@ import ( regmock "github.com/hashicorp/nomad/client/serviceregistration/mock" "github.com/hashicorp/nomad/client/state" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -83,7 +84,9 @@ func TestTracker_ConsulChecks_Healthy(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -134,7 +137,9 @@ func TestTracker_NomadChecks_Healthy(t *testing.T) { consul := regmock.NewServiceRegistrationHandler(logger) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -201,7 +206,9 @@ func TestTracker_NomadChecks_Unhealthy(t *testing.T) { consul := regmock.NewServiceRegistrationHandler(logger) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -260,7 +267,9 @@ func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -301,7 +310,9 @@ func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, alloc.Job.TaskGroups[0].Migrate.MinHealthyTime, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, alloc.Job.TaskGroups[0].Migrate.MinHealthyTime, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -380,7 +391,9 @@ func TestTracker_ConsulChecks_Unhealthy(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -459,7 +472,9 @@ func TestTracker_ConsulChecks_HealthyToUnhealthy(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond minHealthyTime := 2 * time.Second - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, minHealthyTime, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, minHealthyTime, true) tracker.checkLookupInterval = checkInterval assertChecksHealth := func(exp bool) { @@ -548,7 +563,9 @@ func TestTracker_ConsulChecks_SlowCheckRegistration(t *testing.T) { consul := regmock.NewServiceRegistrationHandler(logger) checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval assertChecksHealth := func(exp bool) { @@ -599,7 +616,8 @@ func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) { ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() - tracker := NewTracker(ctx, logger, alloc, nil, nil, nil, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + tracker := NewTracker(ctx, logger, alloc, nil, taskEnv, nil, nil, time.Millisecond, true) assertNoHealth := func() { require.NoError(t, tracker.ctx.Err()) @@ -708,7 +726,9 @@ func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -853,7 +873,9 @@ func TestTracker_ConsulChecks_OnUpdate(t *testing.T) { checks := checkstore.NewStore(logger, state.NewMemDB(logger)) checkInterval := 10 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, time.Millisecond, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, time.Millisecond, true) tracker.checkLookupInterval = checkInterval tracker.Start() @@ -971,7 +993,9 @@ func TestTracker_NomadChecks_OnUpdate(t *testing.T) { consul := regmock.NewServiceRegistrationHandler(logger) minHealthyTime := 1 * time.Millisecond - tracker := NewTracker(ctx, logger, alloc, b.Listen(), consul, checks, minHealthyTime, true) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + + tracker := NewTracker(ctx, logger, alloc, b.Listen(), taskEnv, consul, checks, minHealthyTime, true) tracker.checkLookupInterval = 10 * time.Millisecond tracker.Start() @@ -1273,11 +1297,68 @@ func TestTracker_evaluateConsulChecks(t *testing.T) { }, }, }, + { + name: "checks with variable interpolation", + exp: true, + tg: &structs.TaskGroup{ + Services: []*structs.Service{{ + Name: "${TASKGROUP}-group-s1-${NOMAD_REGION}", + Checks: []*structs.ServiceCheck{ + {Name: ""}, + }, + }}, + Tasks: []*structs.Task{ + { + Services: []*structs.Service{ + { + Name: "${TASK}-task-s2-${NOMAD_NAMESPACE}", + TaskName: "web", + Checks: []*structs.ServiceCheck{ + {Name: ""}, + }, + }, + }, + }, + }, + }, + registrations: &serviceregistration.AllocRegistration{ + Tasks: map[string]*serviceregistration.ServiceRegistrations{ + "group": { + Services: map[string]*serviceregistration.ServiceRegistration{ + "abc123": { + ServiceID: "abc123", + Checks: []*consulapi.AgentCheck{ + { + Name: `service: "web-group-s1-global" check`, + Status: consulapi.HealthPassing, + }, + }, + }, + }, + }, + "task": { + Services: map[string]*serviceregistration.ServiceRegistration{ + "def234": { + ServiceID: "def234", + Checks: []*consulapi.AgentCheck{ + { + Name: `service: "web-task-s2-default" check`, + Status: consulapi.HealthPassing, + }, + }, + }, + }, + }, + }, + }, + }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - result := evaluateConsulChecks(tc.tg, tc.registrations) + alloc := mock.Alloc() + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + result := evaluateConsulChecks(alloc, tc.tg, taskEnv, tc.registrations) must.Eq(t, tc.exp, result) }) } diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index 8b39851fb75b..4d90250dfd29 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -138,7 +138,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { config.Node, ar.Alloc(), nil, config.Region).SetAllocDir(ar.allocDir.AllocDir) // Create a taskenv.TaskEnv which is used for read only purposes by the - // newNetworkHook. + // hooks that need to interpolate dynamic values. builtTaskEnv := envBuilder.Build() // Create the alloc directory hook. This is run first to ensure the @@ -149,7 +149,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { newCgroupHook(ar.Alloc(), ar.cpusetManager), newUpstreamAllocsHook(hookLogger, ar.prevAllocWatcher), newDiskMigrationHook(hookLogger, ar.prevAllocMigrator, ar.allocDir), - newAllocHealthWatcherHook(hookLogger, alloc, hs, ar.Listener(), ar.consulClient, ar.checkStore), + newAllocHealthWatcherHook(hookLogger, alloc, builtTaskEnv, hs, ar.Listener(), ar.consulClient, ar.checkStore), newNetworkHook(hookLogger, ns, alloc, nm, nc, ar, builtTaskEnv), newGroupServiceHook(groupServiceHookConfig{ alloc: alloc, diff --git a/client/allocrunner/health_hook.go b/client/allocrunner/health_hook.go index bf799a97bf20..9921a732fa66 100644 --- a/client/allocrunner/health_hook.go +++ b/client/allocrunner/health_hook.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/client/serviceregistration" "github.com/hashicorp/nomad/client/serviceregistration/checks/checkstore" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" ) @@ -64,6 +65,9 @@ type allocHealthWatcherHook struct { // alloc set by new func or Update. Must hold hookLock to access. alloc *structs.Allocation + // taskEnv is used to interpolate dynamic values in services. + taskEnv *taskenv.TaskEnv + // isDeploy is true if monitoring a deployment. Set in init(). Must // hold hookLock to access. isDeploy bool @@ -71,8 +75,15 @@ type allocHealthWatcherHook struct { logger hclog.Logger } -func newAllocHealthWatcherHook(logger hclog.Logger, alloc *structs.Allocation, hs healthSetter, - listener *cstructs.AllocListener, consul serviceregistration.Handler, checkStore checkstore.Shim) interfaces.RunnerHook { +func newAllocHealthWatcherHook( + logger hclog.Logger, + alloc *structs.Allocation, + taskEnv *taskenv.TaskEnv, + hs healthSetter, + listener *cstructs.AllocListener, + consul serviceregistration.Handler, + checkStore checkstore.Shim, +) interfaces.RunnerHook { // Neither deployments nor migrations care about the health of // non-service jobs so never watch their health @@ -86,6 +97,7 @@ func newAllocHealthWatcherHook(logger hclog.Logger, alloc *structs.Allocation, h h := &allocHealthWatcherHook{ alloc: alloc, + taskEnv: taskEnv, cancelFn: func() {}, // initialize to prevent nil func panics watchDone: closedDone, consul: consul, @@ -138,7 +150,7 @@ func (h *allocHealthWatcherHook) init() error { h.logger.Trace("watching", "deadline", deadline, "checks", useChecks, "min_healthy_time", minHealthyTime) // Create a new tracker, start it, and watch for health results. tracker := allochealth.NewTracker( - ctx, h.logger, h.alloc, h.listener, h.consul, h.checkStore, minHealthyTime, useChecks, + ctx, h.logger, h.alloc, h.listener, h.taskEnv, h.consul, h.checkStore, minHealthyTime, useChecks, ) tracker.Start() diff --git a/client/allocrunner/health_hook_test.go b/client/allocrunner/health_hook_test.go index 3d885dffd06a..7d9e2c6a3378 100644 --- a/client/allocrunner/health_hook_test.go +++ b/client/allocrunner/health_hook_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/nomad/client/serviceregistration" regMock "github.com/hashicorp/nomad/client/serviceregistration/mock" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -97,7 +98,9 @@ func TestHealthHook_PrerunPostrun(t *testing.T) { hs := &mockHealthSetter{} checks := new(mock.CheckShim) - h := newAllocHealthWatcherHook(logger, mock.Alloc(), hs, b.Listen(), consul, checks) + alloc := mock.Alloc() + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(logger, alloc.Copy(), taskEnv, hs, b.Listen(), consul, checks) // Assert we implemented the right interfaces prerunh, ok := h.(interfaces.RunnerPrerunHook) @@ -136,7 +139,8 @@ func TestHealthHook_PrerunUpdatePostrun(t *testing.T) { hs := &mockHealthSetter{} checks := new(mock.CheckShim) - h := newAllocHealthWatcherHook(logger, alloc.Copy(), hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(logger, alloc.Copy(), taskEnv, hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) // Prerun require.NoError(h.Prerun()) @@ -176,7 +180,8 @@ func TestHealthHook_UpdatePrerunPostrun(t *testing.T) { hs := &mockHealthSetter{} checks := new(mock.CheckShim) - h := newAllocHealthWatcherHook(logger, alloc.Copy(), hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(logger, alloc.Copy(), taskEnv, hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) // Set a DeploymentID to cause ClearHealth to be called alloc.DeploymentID = uuid.Generate() @@ -217,8 +222,10 @@ func TestHealthHook_Postrun(t *testing.T) { consul := regMock.NewServiceRegistrationHandler(logger) hs := &mockHealthSetter{} + alloc := mock.Alloc() checks := new(mock.CheckShim) - h := newAllocHealthWatcherHook(logger, mock.Alloc(), hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(logger, alloc.Copy(), taskEnv, hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) // Postrun require.NoError(h.Postrun()) @@ -285,7 +292,8 @@ func TestHealthHook_SetHealth_healthy(t *testing.T) { hs := newMockHealthSetter() checks := new(mock.CheckShim) - h := newAllocHealthWatcherHook(logger, alloc.Copy(), hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(logger, alloc.Copy(), taskEnv, hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) // Prerun require.NoError(h.Prerun()) @@ -374,7 +382,8 @@ func TestHealthHook_SetHealth_unhealthy(t *testing.T) { hs := newMockHealthSetter() checks := new(mock.CheckShim) - h := newAllocHealthWatcherHook(logger, alloc.Copy(), hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(logger, alloc.Copy(), taskEnv, hs, b.Listen(), consul, checks).(*allocHealthWatcherHook) // Prerun require.NoError(h.Prerun()) @@ -395,7 +404,9 @@ func TestHealthHook_SetHealth_unhealthy(t *testing.T) { func TestHealthHook_SystemNoop(t *testing.T) { ci.Parallel(t) - h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.SystemAlloc(), nil, nil, nil, nil) + alloc := mock.SystemAlloc() + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(testlog.HCLogger(t), alloc.Copy(), taskEnv, nil, nil, nil, nil) // Assert that it's the noop impl _, ok := h.(noopAllocHealthWatcherHook) @@ -416,7 +427,9 @@ func TestHealthHook_SystemNoop(t *testing.T) { func TestHealthHook_BatchNoop(t *testing.T) { ci.Parallel(t) - h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.BatchAlloc(), nil, nil, nil, nil) + alloc := mock.BatchAlloc() + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build() + h := newAllocHealthWatcherHook(testlog.HCLogger(t), alloc.Copy(), taskEnv, nil, nil, nil, nil) // Assert that it's the noop impl _, ok := h.(noopAllocHealthWatcherHook) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index efa4d0fc6228..c65da0548432 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -6460,12 +6460,20 @@ func (tg *TaskGroup) Canonicalize(job *Job) { // NomadServices returns a list of all group and task - level services in tg that // are making use of the nomad service provider. +// +// The returned services must be canonicalized and go though a taskenv.TaskEnv +// to have its values interpolated. func (tg *TaskGroup) NomadServices() []*Service { return tg.filterServices(func(s *Service) bool { return s.Provider == ServiceProviderNomad }) } +// ConsulServices returns a list of all group and task - level services in tg +// that are making use of the consul service provider. +// +// The returned services must be canonicalized and go though a taskenv.TaskEnv +// to have its values interpolated. func (tg *TaskGroup) ConsulServices() []*Service { return tg.filterServices(func(s *Service) bool { return s.Provider == ServiceProviderConsul || s.Provider == ""