Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve staleness logic by improving error handling and retrying a subset of errors #32

Merged
merged 34 commits into from
Jul 10, 2019
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
a896944
Merge conflicts
lrao100 Jun 18, 2019
0b5c536
Merge conflicts
lrao100 Jun 18, 2019
1b72891
Fix lint, add comments
lrao100 Jun 18, 2019
309c5de
Remove references to staleness duration
lrao100 Jun 18, 2019
92c1049
Reset error to empty
lrao100 Jun 18, 2019
8d16b51
Revert unintended changes
lrao100 Jun 18, 2019
9a30fa5
Revert unintended changes
lrao100 Jun 18, 2019
179380b
Fixes
lrao100 Jun 18, 2019
551cd23
Fix typo
lrao100 Jun 18, 2019
370d64b
Fix imports-ed
lrao100 Jun 18, 2019
11c0341
Fix error condition during first deploy
lrao100 Jun 19, 2019
fde1dde
Update error handling
lrao100 Jun 19, 2019
25d90c8
Fix integration test and manually update config flags for clean build
lrao100 Jun 20, 2019
e5b1f56
Fix integration test
lrao100 Jun 20, 2019
a161d31
Fix unit tests with updates
lrao100 Jun 20, 2019
c542e37
Add more unit tests
lrao100 Jun 20, 2019
d676d8c
Actually generate
lrao100 Jun 20, 2019
1fe6929
Remove staleness config from integ
lrao100 Jun 20, 2019
c207e36
Fix space
lrao100 Jun 20, 2019
10c48df
First pass at review comments
lrao100 Jun 24, 2019
0cd10d4
Second pass at review comments
lrao100 Jun 25, 2019
1808750
Improve error codes based on local testing
lrao100 Jun 25, 2019
3f85790
Improving backoff with jitter and increasing default retries
lrao100 Jun 27, 2019
60919ed
Refactor to make the retry checks simpler
lrao100 Jun 27, 2019
3d245d9
Use handle() to retry instead of sleeping in the goroutine
lrao100 Jun 28, 2019
d102c02
Fix deep copy gen
lrao100 Jun 28, 2019
d8de801
Fix lint
lrao100 Jun 28, 2019
85081ed
Separate methods for retry and failfast methods, add retries for subm…
lrao100 Jul 3, 2019
d421422
Fix error code on submit job
lrao100 Jul 3, 2019
9aaa9f2
Simplify error types to 1
lrao100 Jul 9, 2019
057d411
Fix integration test
lrao100 Jul 10, 2019
29d6ad3
Update local config
lrao100 Jul 10, 2019
741ab17
Resolve conflicts
lrao100 Jul 10, 2019
9b59d32
Update integ direct mode config
lrao100 Jul 10, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion config/test/flinkk8soperator_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
operator:
ingressUrlFormat: "{{$jobCluster}}.lyft.xyz"
containerNameFormat: "%s-unknown"
statemachineStalenessDuration: 10m
logger:
show-source: true
level: 4
11 changes: 5 additions & 6 deletions integ/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,11 @@ func (s *IntegSuite) SetUpSuite(c *C) {

if runDirect {
config := controllerConfig.Config{
LimitNamespace: namespace,
UseProxy: true,
ResyncPeriod: flyteConfig.Duration{Duration: 3 * time.Second},
StatemachineStalenessDuration: flyteConfig.Duration{Duration: 30 * time.Second},
MetricsPrefix: "flinkk8soperator",
ProxyPort: flyteConfig.Port{Port: 8001},
LimitNamespace: namespace,
UseProxy: true,
ResyncPeriod: flyteConfig.Duration{Duration: 3 * time.Second},
MetricsPrefix: "flinkk8soperator",
ProxyPort: flyteConfig.Port{Port: 8001},
}

log.Info("Running operator directly")
Expand Down
2 changes: 1 addition & 1 deletion integ/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func (f *TestUtil) CreateCRD() error {

func (f *TestUtil) CreateOperator() error {
configValue := make(map[string]string)
configValue["development"] = "operator:\n containerNameFormat: \"%s-unknown\"\n statemachineStalenessDuration: 40s\n resyncPeriod: 5s"
configValue["development"] = "operator:\n containerNameFormat: \"%s-unknown\"\n resyncPeriod: 5s"

configMap := v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Expand Down
1 change: 0 additions & 1 deletion local_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
operator:
useKubectlProxy: true
containerNameFormat: "%s-unknown"
statemachineStalenessDuration: 1m
metricsPrefix: "flinkk8soperator"
resyncPeriod: 10s
proxyPort: 8001
Expand Down
2 changes: 2 additions & 0 deletions pkg/apis/app/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,8 @@ type FlinkApplicationStatus struct {
JobStatus FlinkJobStatus `json:"jobStatus"`
FailedDeployHash string `json:"failedUpdateHash,omitEmpty"`
DeployHash string `json:"deployHash"`
RetryCount int32 `json:"retryCount,omitEmpty"`
LastSeenError string `json:"lastSeenError,omitEmpty"`
}

func (in *FlinkApplicationStatus) GetPhase() FlinkApplicationPhase {
Expand Down
19 changes: 9 additions & 10 deletions pkg/controller/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,15 @@ const configSectionKey = "operator"
var ConfigSection = config.MustRegisterSection(configSectionKey, &Config{})

type Config struct {
ResyncPeriod config.Duration `json:"resyncPeriod" pflag:"\"30s\",Determines the resync period for all watchers."`
LimitNamespace string `json:"limitNamespace" pflag:"\"\",Namespaces to watch for by flink operator"`
MetricsPrefix string `json:"metricsPrefix" pflag:"\"flinkk8soperator\",Prefix for metrics propagated to prometheus"`
ProfilerPort config.Port `json:"prof-port" pflag:"\"10254\",Profiler port"`
FlinkIngressURLFormat string `json:"ingressUrlFormat"`
UseProxy bool `json:"useKubectlProxy"`
ProxyPort config.Port `json:"ProxyPort" pflag:"\"8001\",The port at which flink cluster runs locally"`
ContainerNameFormat string `json:"containerNameFormat"`
Workers int `json:"workers" pflag:"4,Number of routines to process custom resource"`
StatemachineStalenessDuration config.Duration `json:"statemachineStalenessDuration" pflag:"\"5m\",Duration for statemachine staleness."`
ResyncPeriod config.Duration `json:"resyncPeriod" pflag:"\"30s\",Determines the resync period for all watchers."`
LimitNamespace string `json:"limitNamespace" pflag:"\"\",Namespaces to watch for by flink operator"`
MetricsPrefix string `json:"metricsPrefix" pflag:"\"flinkk8soperator\",Prefix for metrics propagated to prometheus"`
ProfilerPort config.Port `json:"prof-port" pflag:"\"10254\",Profiler port"`
FlinkIngressURLFormat string `json:"ingressUrlFormat"`
UseProxy bool `json:"useKubectlProxy"`
ProxyPort config.Port `json:"ProxyPort" pflag:"\"8001\",The port at which flink cluster runs locally"`
ContainerNameFormat string `json:"containerNameFormat"`
Workers int `json:"workers" pflag:"4,Number of routines to process custom resource"`
}

func GetConfig() *Config {
Expand Down
31 changes: 29 additions & 2 deletions pkg/controller/config/config_flags.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 0 additions & 22 deletions pkg/controller/config/config_flags_test.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

57 changes: 28 additions & 29 deletions pkg/controller/flink/client/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,18 @@ func (c *FlinkJobManagerClient) GetJobConfig(ctx context.Context, url, jobID str
response, err := c.executeRequest(ctx, httpGet, url, nil)
if err != nil {
c.metrics.getJobConfigFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "GetJobConfig API request failed")
return nil, GetError(err, "GetJobConfig", globalFailure)
}

if response != nil && !response.IsSuccess() {
c.metrics.getJobConfigFailureCounter.Inc(ctx)
logger.Errorf(ctx, fmt.Sprintf("Get Jobconfig failed with response %v", response))
return nil, errors.New(fmt.Sprintf("Get Jobconfig failed with status %v", response.Status()))
return nil, GetError(err, "GetJobConfig", response.Status())
}
var jobConfigResponse JobConfigResponse
if err := json.Unmarshal(response.Body(), &jobConfigResponse); err != nil {
logger.Errorf(ctx, "Unable to Unmarshal jobPlanResponse %v, err: %v", response, err)
return nil, err
return nil, GetError(err, "GetJobConfig", jsonUnmarshalError)
}
c.metrics.getJobConfigSuccessCounter.Inc(ctx)
return &jobConfigResponse, nil
Expand All @@ -123,19 +123,19 @@ func (c *FlinkJobManagerClient) GetClusterOverview(ctx context.Context, url stri
response, err := c.executeRequest(ctx, httpGet, url, nil)
if err != nil {
c.metrics.getClusterFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "GetClusterOverview API request failed")
return nil, GetError(err, "GetClusterOverview", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.getClusterFailureCounter.Inc(ctx)
if response.StatusCode() != int(http.StatusNotFound) || response.StatusCode() != int(http.StatusServiceUnavailable) {
logger.Errorf(ctx, fmt.Sprintf("Get cluster overview failed with response %v", response))
}
return nil, errors.New(fmt.Sprintf("Get cluster overview failed with status %v", response.Status()))
return nil, GetError(err, "GetClusterOverview", response.Status())
}
var clusterOverviewResponse ClusterOverviewResponse
if err = json.Unmarshal(response.Body(), &clusterOverviewResponse); err != nil {
logger.Errorf(ctx, "Unable to Unmarshal clusterOverviewResponse %v, err: %v", response, err)
return nil, err
return nil, GetError(err, "GetClusterOverview", jsonUnmarshalError)
}
c.metrics.getClusterSuccessCounter.Inc(ctx)
return &clusterOverviewResponse, nil
Expand Down Expand Up @@ -174,17 +174,17 @@ func (c *FlinkJobManagerClient) CancelJobWithSavepoint(ctx context.Context, url
response, err := c.executeRequest(ctx, httpPost, url, cancelJobRequest)
if err != nil {
c.metrics.cancelJobFailureCounter.Inc(ctx)
return "", errors.Wrap(err, "Cancel job API request failed")
return "", GetError(err, "CancelJobWithSavepoint", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.cancelJobFailureCounter.Inc(ctx)
logger.Errorf(ctx, fmt.Sprintf("Cancel job failed with response %v", response))
return "", errors.New(fmt.Sprintf("Cancel job failed with status %v", response.Status()))
return "", GetError(err, "CancelJobWithSavepoint", response.Status())
}
var cancelJobResponse CancelJobResponse
if err = json.Unmarshal(response.Body(), &cancelJobResponse); err != nil {
logger.Errorf(ctx, "Unable to Unmarshal cancelJobResponse %v, err: %v", response, err)
return "", err
return "", GetError(err, "CancelJobWithSavepoint", jsonUnmarshalError)
}
c.metrics.cancelJobSuccessCounter.Inc(ctx)
return cancelJobResponse.TriggerID, nil
Expand All @@ -198,12 +198,12 @@ func (c *FlinkJobManagerClient) ForceCancelJob(ctx context.Context, url string,
response, err := c.executeRequest(ctx, httpPatch, url, nil)
if err != nil {
c.metrics.forceCancelJobFailureCounter.Inc(ctx)
return errors.Wrap(err, "Force cancel job API request failed")
return GetError(err, "ForceCancelJob", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.forceCancelJobFailureCounter.Inc(ctx)
logger.Errorf(ctx, fmt.Sprintf("Force cancel job failed with response %v", response))
return errors.New(fmt.Sprintf("Force cancel job failed with status %v", response.Status()))
return GetError(err, "ForceCancelJob", response.Status())
}

c.metrics.forceCancelJobFailureCounter.Inc(ctx)
Expand All @@ -217,18 +217,17 @@ func (c *FlinkJobManagerClient) SubmitJob(ctx context.Context, url string, jarID
response, err := c.executeRequest(ctx, httpPost, url, submitJobRequest)
if err != nil {
c.metrics.submitJobFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "Submit job API request failed")
return nil, GetError(err, "SubmitJob", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.submitJobFailureCounter.Inc(ctx)
logger.Warnf(ctx, fmt.Sprintf("Job submission failed with response %v", response))
return nil, errors.New(fmt.Sprintf("Job submission failed with status %v\n%s",
response.Status(), string(response.Body())))
return nil, GetError(err, "SubmitJob", response.Status())
glaksh100 marked this conversation as resolved.
Show resolved Hide resolved
}
var submitJobResponse SubmitJobResponse
if err = json.Unmarshal(response.Body(), &submitJobResponse); err != nil {
logger.Errorf(ctx, "Unable to Unmarshal submitJobResponse %v, err: %v", response, err)
return nil, err
return nil, GetError(err, "SubmitJob", jsonUnmarshalError)
}

c.metrics.submitJobSuccessCounter.Inc(ctx)
Expand All @@ -242,17 +241,17 @@ func (c *FlinkJobManagerClient) CheckSavepointStatus(ctx context.Context, url st
response, err := c.executeRequest(ctx, httpGet, url, nil)
if err != nil {
c.metrics.checkSavepointFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "Check savepoint status API request failed")
return nil, GetError(err, "CheckSavepointStatus", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.checkSavepointFailureCounter.Inc(ctx)
logger.Errorf(ctx, fmt.Sprintf("Check savepoint status failed with response %v", response))
return nil, errors.New(fmt.Sprintf("Check savepoint status failed with status %v", response.Status()))
return nil, GetError(err, "CheckSavepointStatus", response.Status())
}
var savepointResponse SavepointResponse
if err = json.Unmarshal(response.Body(), &savepointResponse); err != nil {
logger.Errorf(ctx, "Unable to Unmarshal savepointResponse %v, err: %v", response, err)
return nil, err
return nil, GetError(err, "CheckSavepointStatus", jsonUnmarshalError)
}
c.metrics.cancelJobSuccessCounter.Inc(ctx)
return &savepointResponse, nil
Expand All @@ -263,18 +262,18 @@ func (c *FlinkJobManagerClient) GetJobs(ctx context.Context, url string) (*GetJo
response, err := c.executeRequest(ctx, httpGet, url, nil)
if err != nil {
c.metrics.getJobsFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "Get jobs API request failed")
return nil, GetError(err, "GetJobs", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.getJobsFailureCounter.Inc(ctx)
logger.Errorf(ctx, fmt.Sprintf("GetJobs failed with response %v", response))
return nil, errors.New(fmt.Sprintf("GetJobs request failed with status %v", response.Status()))
return nil, GetError(err, "GetJobs", response.Status())
}
var getJobsResponse GetJobsResponse
if err = json.Unmarshal(response.Body(), &getJobsResponse); err != nil {
logger.Errorf(ctx, "%v", getJobsResponse)
logger.Errorf(ctx, "Unable to Unmarshal getJobsResponse %v, err: %v", response, err)
return nil, err
return nil, GetError(err, "GetJobs", response.Status())
}
c.metrics.getJobsSuccessCounter.Inc(ctx)
return &getJobsResponse, nil
Expand All @@ -285,11 +284,11 @@ func (c *FlinkJobManagerClient) GetLatestCheckpoint(ctx context.Context, url str
response, err := c.executeRequest(ctx, httpGet, endpoint, nil)
if err != nil {
c.metrics.getCheckpointsFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "get checkpoints failed")
return nil, GetError(err, "GetLatestCheckpoint", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.getCheckpointsFailureCounter.Inc(ctx)
return nil, errors.New(fmt.Sprintf("get checkpoints failed with response %v", response))
return nil, GetError(err, "GetLatestCheckpoint", response.Status())
}

var checkpointResponse CheckpointResponse
Expand All @@ -305,11 +304,11 @@ func (c *FlinkJobManagerClient) GetTaskManagers(ctx context.Context, url string)
endpoint := url + taskmanagersURL
response, err := c.executeRequest(ctx, httpGet, endpoint, nil)
if err != nil {
return nil, errors.Wrap(err, "get taskmanagers failed")
return nil, GetError(err, "GetTaskManagers", globalFailure)
}

if response != nil && !response.IsSuccess() {
return nil, errors.New(fmt.Sprintf("get taskmanagers failed with response %v", response))
return nil, GetError(err, "GetTaskManagers", response.Status())
}

var taskmanagerResponse TaskManagersResponse
Expand All @@ -326,11 +325,11 @@ func (c *FlinkJobManagerClient) GetCheckpointCounts(ctx context.Context, url str
response, err := c.executeRequest(ctx, httpGet, endpoint, nil)
if err != nil {
c.metrics.getCheckpointsFailureCounter.Inc(ctx)
return nil, errors.Wrap(err, "get checkpoints failed")
return nil, GetError(err, "GetCheckpointCounts", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.getCheckpointsFailureCounter.Inc(ctx)
return nil, errors.New(fmt.Sprintf("get checkpoints failed with response %v", response))
return nil, GetError(err, "GetCheckpointCounts", response.Status())
}

var checkpointResponse CheckpointResponse
Expand All @@ -346,11 +345,11 @@ func (c *FlinkJobManagerClient) GetJobOverview(ctx context.Context, url string,
endpoint := fmt.Sprintf(url+getJobsOverviewURL, jobID)
response, err := c.executeRequest(ctx, httpGet, endpoint, nil)
if err != nil {
return nil, errors.Wrap(err, "get job overview failed")
return nil, GetError(err, "GetJobOverview", globalFailure)
}
if response != nil && !response.IsSuccess() {
c.metrics.getCheckpointsFailureCounter.Inc(ctx)
return nil, errors.New(fmt.Sprintf("get job overview failed with response %v", response))
return nil, GetError(err, "GetJobOverview", response.Status())
}

var jobOverviewResponse FlinkJobOverview
Expand Down
Loading