diff --git a/cmd/sdk-server/main.go b/cmd/sdk-server/main.go index 3166160813..aa2bafa18b 100644 --- a/cmd/sdk-server/main.go +++ b/cmd/sdk-server/main.go @@ -25,8 +25,10 @@ import ( "strings" "time" + agonesv1 "agones.dev/agones/pkg/apis/agones/v1" gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/spf13/viper" "github.com/tmc/grpc-websocket-proxy/wsproxy" @@ -71,6 +73,12 @@ var ( func main() { ctlConf := parseEnvFlags() + logLevel, err := logrus.ParseLevel(ctlConf.LogLevel) + if err != nil { + logrus.WithError(err).Warn("Invalid LOG_LEVEL value. Defaulting to 'info'.") + logLevel = logrus.InfoLevel + } + logger.Logger.SetLevel(logLevel) logger.WithField("version", pkg.Version).WithField("featureGates", runtime.EncodeFeatures()). WithField("ctlConf", ctlConf).Info("Starting sdk sidecar") @@ -139,7 +147,7 @@ func main() { var s *sdkserver.SDKServer s, err = sdkserver.NewSDKServer(ctlConf.GameServerName, ctlConf.PodNamespace, - kubeClient, agonesClient) + kubeClient, agonesClient, logLevel) if err != nil { logger.WithError(err).Fatalf("Could not start sidecar") } @@ -261,6 +269,7 @@ func runGateway(ctx context.Context, grpcEndpoint string, mux *gwruntime.ServeMu // parseEnvFlags parses all the flags and environment variables and returns // a configuration structure func parseEnvFlags() config { + var gs agonesv1.GameServer viper.AllowEmptyEnv(true) viper.SetDefault(localFlag, false) viper.SetDefault(fileFlag, "") @@ -272,6 +281,7 @@ func parseEnvFlags() config { viper.SetDefault(gracefulTerminationFlag, true) viper.SetDefault(grpcPortFlag, defaultGRPCPort) viper.SetDefault(httpPortFlag, defaultHTTPPort) + viper.SetDefault("logLevel", string(gs.Spec.SdkServer.LogLevel)) pflag.String(gameServerNameFlag, viper.GetString(gameServerNameFlag), "Optional flag to set GameServer name. Overrides value given from `GAMESERVER_NAME` environment variable.") pflag.String(podNamespaceFlag, viper.GetString(gameServerNameFlag), @@ -307,8 +317,8 @@ func parseEnvFlags() config { runtime.Must(viper.BindEnv(grpcPortFlag)) runtime.Must(viper.BindEnv(httpPortFlag)) runtime.Must(viper.BindPFlags(pflag.CommandLine)) + runtime.Must(viper.BindEnv("logLevel", "SDK_LOG_LEVEL")) runtime.Must(runtime.FeaturesBindEnv()) - runtime.Must(runtime.ParseFeaturesFromEnv()) return config{ @@ -325,6 +335,7 @@ func parseEnvFlags() config { GracefulTermination: viper.GetBool(gracefulTerminationFlag), GRPCPort: viper.GetInt(grpcPortFlag), HTTPPort: viper.GetInt(httpPortFlag), + LogLevel: viper.GetString("logLevel"), } } @@ -343,6 +354,7 @@ type config struct { GracefulTermination bool GRPCPort int HTTPPort int + LogLevel string } // healthCheckWrapper ensures that an http 400 response is returned diff --git a/pkg/gameservers/controller.go b/pkg/gameservers/controller.go index 0e6ffa9b24..79d78d1174 100644 --- a/pkg/gameservers/controller.go +++ b/pkg/gameservers/controller.go @@ -659,6 +659,10 @@ func (c *Controller) sidecar(gs *agonesv1.GameServer) corev1.Container { Name: "FEATURE_GATES", Value: runtime.EncodeFeatures(), }, + { + Name: "LOG_LEVEL", + Value: string(gs.Spec.SdkServer.LogLevel), + }, }, Resources: corev1.ResourceRequirements{}, LivenessProbe: &corev1.Probe{ diff --git a/pkg/gameservers/controller_test.go b/pkg/gameservers/controller_test.go index 1e8dfc0dc3..e05bef7d33 100644 --- a/pkg/gameservers/controller_test.go +++ b/pkg/gameservers/controller_test.go @@ -1208,11 +1208,13 @@ func TestControllerCreateGameServerPod(t *testing.T) { assert.Equal(t, sidecarContainer.Resources.Requests.Cpu(), &c.sidecarCPURequest) assert.Equal(t, sidecarContainer.Resources.Limits.Memory(), &c.sidecarMemoryLimit) assert.Equal(t, sidecarContainer.Resources.Requests.Memory(), &c.sidecarMemoryRequest) - assert.Len(t, sidecarContainer.Env, 3, "3 env vars") + assert.Len(t, sidecarContainer.Env, 4, "4 env vars") assert.Equal(t, "GAMESERVER_NAME", sidecarContainer.Env[0].Name) assert.Equal(t, fixture.ObjectMeta.Name, sidecarContainer.Env[0].Value) assert.Equal(t, "POD_NAMESPACE", sidecarContainer.Env[1].Name) assert.Equal(t, "FEATURE_GATES", sidecarContainer.Env[2].Name) + assert.Equal(t, "LOG_LEVEL", sidecarContainer.Env[3].Name) + assert.Equal(t, string(fixture.Spec.SdkServer.LogLevel), sidecarContainer.Env[3].Value) gsContainer := pod.Spec.Containers[1] assert.Equal(t, fixture.Spec.Ports[0].HostPort, gsContainer.Ports[0].HostPort) diff --git a/pkg/sdkserver/sdkserver.go b/pkg/sdkserver/sdkserver.go index bd64371224..7102d5a796 100644 --- a/pkg/sdkserver/sdkserver.go +++ b/pkg/sdkserver/sdkserver.go @@ -137,7 +137,7 @@ type SDKServer struct { // NewSDKServer creates a SDKServer that sets up an // InClusterConfig for Kubernetes func NewSDKServer(gameServerName, namespace string, kubeClient kubernetes.Interface, - agonesClient versioned.Interface) (*SDKServer, error) { + agonesClient versioned.Interface, logLevel logrus.Level) (*SDKServer, error) { mux := http.NewServeMux() resync := 30 * time.Second if runtime.FeatureEnabled(runtime.FeatureDisableResyncOnSDKServer) { @@ -182,6 +182,7 @@ func NewSDKServer(gameServerName, namespace string, kubeClient kubernetes.Interf s.informerFactory = factory s.logger = runtime.NewLoggerWithType(s).WithField("gsKey", namespace+"/"+gameServerName) + s.logger.Logger.SetLevel(logLevel) _, _ = gameServers.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, newObj interface{}) { @@ -246,20 +247,6 @@ func (s *SDKServer) Run(ctx context.Context) error { return err } - logLevel := agonesv1.SdkServerLogLevelInfo - // grab configuration details - if gs.Spec.SdkServer.LogLevel != "" { - logLevel = gs.Spec.SdkServer.LogLevel - } - s.logger.WithField("logLevel", logLevel).Debug("Setting LogLevel configuration") - level, err := logrus.ParseLevel(strings.ToLower(string(logLevel))) - if err == nil { - s.logger.Logger.SetLevel(level) - } else { - s.logger.WithError(err).Warn("Specified wrong Logging.SdkServer. Setting default loglevel - Info") - s.logger.Logger.SetLevel(logrus.InfoLevel) - } - s.health = gs.Spec.Health s.logger.WithField("health", s.health).Debug("Setting health configuration") s.healthTimeout = time.Duration(gs.Spec.Health.PeriodSeconds) * time.Second diff --git a/pkg/sdkserver/sdkserver_test.go b/pkg/sdkserver/sdkserver_test.go index e463cff0a1..95b2713fd2 100644 --- a/pkg/sdkserver/sdkserver_test.go +++ b/pkg/sdkserver/sdkserver_test.go @@ -184,7 +184,7 @@ func TestSidecarRun(t *testing.T) { return true, gs, nil }) - sc, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient) + sc, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient, logrus.DebugLevel) stop := make(chan struct{}) defer close(stop) ctx, cancel := context.WithCancel(context.Background()) @@ -442,7 +442,7 @@ func TestSidecarUnhealthyMessage(t *testing.T) { t.Parallel() m := agtesting.NewMocks() - sc, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient) + sc, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient, logrus.DebugLevel) require.NoError(t, err) m.AgonesClient.AddReactor("list", "gameservers", func(action k8stesting.Action) (bool, runtime.Object, error) { @@ -591,7 +591,7 @@ func TestSidecarHealthy(t *testing.T) { func TestSidecarHTTPHealthCheck(t *testing.T) { m := agtesting.NewMocks() - sc, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient) + sc, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient, logrus.DebugLevel) require.NoError(t, err) now := time.Now().Add(time.Hour).UTC() @@ -2325,7 +2325,7 @@ func TestSDKServerGracefulTerminationGameServerStateChannel(t *testing.T) { } func defaultSidecar(m agtesting.Mocks) (*SDKServer, error) { - server, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient) + server, err := NewSDKServer("test", "default", m.KubeClient, m.AgonesClient, logrus.DebugLevel) if err != nil { return server, err } diff --git a/site/content/en/docs/Advanced/scheduling-and-autoscaling.md b/site/content/en/docs/Advanced/scheduling-and-autoscaling.md index 09ded81aa8..93bf7107d0 100644 --- a/site/content/en/docs/Advanced/scheduling-and-autoscaling.md +++ b/site/content/en/docs/Advanced/scheduling-and-autoscaling.md @@ -154,6 +154,16 @@ on bare metal, and the cluster size rarely changes, if at all. This attempts to distribute the load across the entire cluster as much as possible, to take advantage of the static size of the cluster. +{{% alert title="Note" color="info" %}} +`Distributed` scheduling does not set +a [`PodAffinity`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +on `GameServer` `Pods`, and instead assumes that the default scheduler for your cluster will distribute the +`GameServer` `Pods` across the cluster by default. + +If your default scheduler does not do this, you may wish to set your own `PodAffinity` to spread the load across the +cluster, or update the default scheduler to provide this functionality. +{{% /alert %}} + This affects Allocation Scheduling, Pod Scheduling and Fleet Scale Down Scheduling. #### Cluster Autoscaler