diff --git a/.yamato/gym-interface-test.yml b/.yamato/gym-interface-test.yml
index a24d6d7232..2501b8751b 100644
--- a/.yamato/gym-interface-test.yml
+++ b/.yamato/gym-interface-test.yml
@@ -13,7 +13,7 @@ test_gym_interface_{{ editor.version }}:
commands:
- pip install pyyaml
- python -u -m ml-agents.tests.yamato.setup_venv
- - ./venv/bin/python ml-agents/tests/yamato/scripts/run_gym.py --env=Project/testPlayer-Basic
+ - ./venv/bin/python ml-agents/tests/yamato/scripts/run_gym.py --env=artifacts/testPlayer-Basic
dependencies:
- .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }}
triggers:
diff --git a/.yamato/protobuf-generation-test.yml b/.yamato/protobuf-generation-test.yml
index 5afd7748dc..9805fc9794 100644
--- a/.yamato/protobuf-generation-test.yml
+++ b/.yamato/protobuf-generation-test.yml
@@ -36,6 +36,6 @@ test_mac_protobuf_generation:
- "protobuf-definitions/*.md"
- "protobuf-definitions/**/*.md"
artifacts:
- dist:
+ patch:
paths:
- - "artifacts/*"
+ - "artifacts/*.*"
diff --git a/.yamato/python-ll-api-test.yml b/.yamato/python-ll-api-test.yml
index aec111225a..ab666b9331 100644
--- a/.yamato/python-ll-api-test.yml
+++ b/.yamato/python-ll-api-test.yml
@@ -15,7 +15,7 @@ test_mac_ll_api_{{ editor.version }}:
- python -u -m ml-agents.tests.yamato.setup_venv
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py
dependencies:
- - .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }} --env=Project/testPlayer
+ - .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }}
triggers:
cancel_old_ci: true
changes:
diff --git a/.yamato/standalone-build-test.yml b/.yamato/standalone-build-test.yml
index eb0fd5168b..58d07f5804 100644
--- a/.yamato/standalone-build-test.yml
+++ b/.yamato/standalone-build-test.yml
@@ -27,7 +27,10 @@ test_mac_standalone_{{ editor.version }}:
- "com.unity.ml-agents/*.md"
- "com.unity.ml-agents/**/*.md"
artifacts:
+ logs:
+ paths:
+ - "artifacts/standalone_build.txt"
standalonebuild:
paths:
- - "Project/testPlayer*/**"
+ - "artifacts/testPlayer*/**"
{% endfor %}
diff --git a/.yamato/training-int-tests.yml b/.yamato/training-int-tests.yml
index b39bb0fc02..e929ecdce8 100644
--- a/.yamato/training-int-tests.yml
+++ b/.yamato/training-int-tests.yml
@@ -17,8 +17,8 @@ test_mac_training_int_{{ editor.version }}:
# Backwards-compatibility tests.
# If we make a breaking change to the communication protocol, these will need
# to be disabled until the next release.
- - python -u -m ml-agents.tests.yamato.training_int_tests --python=0.15.0
- - python -u -m ml-agents.tests.yamato.training_int_tests --csharp=0.15.0
+ # - python -u -m ml-agents.tests.yamato.training_int_tests --python=0.15.0
+ # - python -u -m ml-agents.tests.yamato.training_int_tests --csharp=0.15.0
dependencies:
- .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }}
triggers:
@@ -35,7 +35,10 @@ test_mac_training_int_{{ editor.version }}:
- "com.unity.ml-agents/*.md"
- "com.unity.ml-agents/**/*.md"
artifacts:
- unit:
+ logs:
paths:
- - "artifacts/**"
+ - "artifacts/standalone_build.txt"
+ standalonebuild:
+ paths:
+ - "artifacts/testplayer*/**"
{% endfor %}
diff --git a/com.unity.ml-agents/CHANGELOG.md b/com.unity.ml-agents/CHANGELOG.md
index 5f2093e0ef..9d91c544fe 100755
--- a/com.unity.ml-agents/CHANGELOG.md
+++ b/com.unity.ml-agents/CHANGELOG.md
@@ -15,6 +15,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Removed the multi-agent gym option from the gym wrapper. For multi-agent scenarios, use the [Low Level Python API](Python-API.md).
- The low level Python API has changed. You can look at the document [Low Level Python API documentation](Python-API.md) for more information. If you use `mlagents-learn` for training, this should be a transparent change.
- Added ability to start training (initialize model weights) from a previous run ID. (#3710)
+ - The internal event `Academy.AgentSetStatus` was renamed to `Academy.AgentPreStep` and made public.
+ - The offset logic was removed from DecisionRequester.
### Minor Changes
- Format of console output has changed slightly and now matches the name of the model/summary directory. (#3630, #3616)
diff --git a/com.unity.ml-agents/Runtime/Academy.cs b/com.unity.ml-agents/Runtime/Academy.cs
index 9db61fbb14..3b6efc98d4 100644
--- a/com.unity.ml-agents/Runtime/Academy.cs
+++ b/com.unity.ml-agents/Runtime/Academy.cs
@@ -58,7 +58,7 @@ public class Academy : IDisposable
/// on each side, although we may allow some flexibility in the future.
/// This should be incremented whenever a change is made to the communication protocol.
///
- const string k_ApiVersion = "0.15.0";
+ const string k_ApiVersion = "0.16.0";
///
/// Unity package version of com.unity.ml-agents.
@@ -138,11 +138,14 @@ public bool IsCommunicatorOn
// This will mark the Agent as Done if it has reached its maxSteps.
internal event Action AgentIncrementStep;
- // Signals to all the agents at each environment step along with the
- // Academy's maxStepReached, done and stepCount values. The agents rely
- // on this event to update their own values of max step reached and done
- // in addition to aligning on the step count of the global episode.
- internal event Action AgentSetStatus;
+
+ ///
+ /// Signals to all of the s that their step is about to begin.
+ /// This is a good time for an to decide if it would like to
+ /// call or
+ /// for this step. Any other pre-step setup could be done during this even as well.
+ ///
+ public event Action AgentPreStep;
// Signals to all the agents at each environment step so they can send
// their state to their Policy if they have requested a decision.
@@ -347,7 +350,7 @@ void ResetActions()
{
DecideAction = () => {};
DestroyAction = () => {};
- AgentSetStatus = i => {};
+ AgentPreStep = i => {};
AgentSendState = () => {};
AgentAct = () => {};
AgentForceReset = () => {};
@@ -423,7 +426,7 @@ public void EnvironmentStep()
ForcedFullReset();
}
- AgentSetStatus?.Invoke(m_StepCount);
+ AgentPreStep?.Invoke(m_StepCount);
m_StepCount += 1;
m_TotalStepCount += 1;
diff --git a/com.unity.ml-agents/Runtime/DecisionRequester.cs b/com.unity.ml-agents/Runtime/DecisionRequester.cs
index 64a50fe1ba..3c2fab97d7 100644
--- a/com.unity.ml-agents/Runtime/DecisionRequester.cs
+++ b/com.unity.ml-agents/Runtime/DecisionRequester.cs
@@ -1,3 +1,4 @@
+using System;
using UnityEngine;
using UnityEngine.Serialization;
@@ -8,12 +9,12 @@ namespace MLAgents
/// at regular intervals.
///
[AddComponentMenu("ML Agents/Decision Requester", (int)MenuGroup.Default)]
- internal class DecisionRequester : MonoBehaviour
+ [RequireComponent(typeof(Agent))]
+ public class DecisionRequester : MonoBehaviour
{
///
/// The frequency with which the agent requests a decision. A DecisionPeriod of 5 means
- /// that the Agent will request a decision every 5 Academy steps.
- ///
+ /// that the Agent will request a decision every 5 Academy steps. ///
[Range(1, 20)]
[Tooltip("The frequency with which the agent requests a decision. A DecisionPeriod " +
"of 5 means that the Agent will request a decision every 5 Academy steps.")]
@@ -29,37 +30,32 @@ internal class DecisionRequester : MonoBehaviour
[FormerlySerializedAs("RepeatAction")]
public bool TakeActionsBetweenDecisions = true;
- ///
- /// Whether or not the Agent decisions should start at an offset (different for each agent).
- /// This does not affect . Turning this on will distribute
- /// the decision-making computations for all the agents across multiple Academy steps.
- /// This can be valuable in scenarios where you have many agents in the scene, particularly
- /// during the inference phase.
- ///
- [Tooltip("Whether or not Agent decisions should start at an offset.")]
- public bool offsetStep;
-
+ [NonSerialized]
Agent m_Agent;
- int m_Offset;
internal void Awake()
{
- m_Offset = offsetStep ? gameObject.GetInstanceID() : 0;
m_Agent = gameObject.GetComponent();
- Academy.Instance.AgentSetStatus += MakeRequests;
+ Debug.Assert(m_Agent != null, "Agent component was not found on this gameObject and is required.");
+ Academy.Instance.AgentPreStep += MakeRequests;
}
void OnDestroy()
{
if (Academy.IsInitialized)
{
- Academy.Instance.AgentSetStatus -= MakeRequests;
+ Academy.Instance.AgentPreStep -= MakeRequests;
}
}
- void MakeRequests(int count)
+ ///
+ /// Method that hooks into the Academy in order inform the Agent on whether or not it should request a
+ /// decision, and whether or not it should take actions between decisions.
+ ///
+ /// The current step count of the academy.
+ void MakeRequests(int academyStepCount)
{
- if ((count + m_Offset) % DecisionPeriod == 0)
+ if (academyStepCount % DecisionPeriod == 0)
{
m_Agent?.RequestDecision();
}
diff --git a/com.unity.ml-agents/Tests/Editor/PublicAPI/PublicApiValidation.cs b/com.unity.ml-agents/Tests/Editor/PublicAPI/PublicApiValidation.cs
index ab6e00b2b2..45f0c6173d 100644
--- a/com.unity.ml-agents/Tests/Editor/PublicAPI/PublicApiValidation.cs
+++ b/com.unity.ml-agents/Tests/Editor/PublicAPI/PublicApiValidation.cs
@@ -4,6 +4,7 @@
using MLAgents.Sensors;
using NUnit.Framework;
using UnityEngine;
+using UnityEngine.TestTools;
namespace MLAgentsExamples
{
@@ -71,106 +72,5 @@ public void CheckSetupRayPerceptionSensorComponent()
sensorComponent.CreateSensor();
}
-
- class PublicApiAgent : Agent
- {
- public int numHeuristicCalls;
-
- public override float[] Heuristic()
- {
- numHeuristicCalls++;
- return base.Heuristic();
- }
- }
-
- // Simple SensorComponent that sets up a StackingSensor
- class StackingComponent : SensorComponent
- {
- public SensorComponent wrappedComponent;
- public int numStacks;
-
- public override ISensor CreateSensor()
- {
- var wrappedSensor = wrappedComponent.CreateSensor();
- return new StackingSensor(wrappedSensor, numStacks);
- }
-
- public override int[] GetObservationShape()
- {
- int[] shape = (int[]) wrappedComponent.GetObservationShape().Clone();
- for (var i = 0; i < shape.Length; i++)
- {
- shape[i] *= numStacks;
- }
-
- return shape;
- }
- }
-
-
- [Test]
- public void CheckSetupAgent()
- {
- var gameObject = new GameObject();
-
- var behaviorParams = gameObject.AddComponent();
- behaviorParams.brainParameters.vectorObservationSize = 3;
- behaviorParams.brainParameters.numStackedVectorObservations = 2;
- behaviorParams.brainParameters.vectorActionDescriptions = new[] { "TestActionA", "TestActionB" };
- behaviorParams.brainParameters.vectorActionSize = new[] { 2, 2 };
- behaviorParams.brainParameters.vectorActionSpaceType = SpaceType.Discrete;
- behaviorParams.behaviorName = "TestBehavior";
- behaviorParams.TeamId = 42;
- behaviorParams.useChildSensors = true;
-
- var agent = gameObject.AddComponent();
- // Make sure we can set the behavior type correctly after the agent is added
- behaviorParams.behaviorType = BehaviorType.InferenceOnly;
- // Can't actually create an Agent with InferenceOnly and no model, so change back
- behaviorParams.behaviorType = BehaviorType.Default;
-
- // TODO - not internal yet
- // var decisionRequester = gameObject.AddComponent();
- // decisionRequester.DecisionPeriod = 2;
-
- var sensorComponent = gameObject.AddComponent();
- sensorComponent.sensorName = "ray3d";
- sensorComponent.detectableTags = new List { "Player", "Respawn" };
- sensorComponent.raysPerDirection = 3;
-
- // Make a StackingSensor that wraps the RayPerceptionSensorComponent3D
- // This isn't necessarily practical, just to ensure that it can be done
- var wrappingSensorComponent = gameObject.AddComponent();
- wrappingSensorComponent.wrappedComponent = sensorComponent;
- wrappingSensorComponent.numStacks = 3;
-
- // ISensor isn't set up yet.
- Assert.IsNull(sensorComponent.raySensor);
-
- agent.LazyInitialize();
- // Make sure we can set the behavior type correctly after the agent is initialized
- // (this creates a new policy).
- behaviorParams.behaviorType = BehaviorType.HeuristicOnly;
-
- // Initialization should set up the sensors
- Assert.IsNotNull(sensorComponent.raySensor);
-
- // Let's change the inference device
- var otherDevice = behaviorParams.inferenceDevice == InferenceDevice.CPU ? InferenceDevice.GPU : InferenceDevice.CPU;
- agent.SetModel(behaviorParams.behaviorName, behaviorParams.model, otherDevice);
-
- agent.AddReward(1.0f);
-
- agent.RequestAction();
- agent.RequestDecision();
-
- Academy.Instance.AutomaticSteppingEnabled = false;
- Academy.Instance.EnvironmentStep();
-
- var actions = agent.GetAction();
- // default Heuristic implementation should return zero actions.
- Assert.AreEqual(new[] {0.0f, 0.0f}, actions);
- Assert.AreEqual(1, agent.numHeuristicCalls);
- }
}
}
diff --git a/com.unity.ml-agents/Tests/Runtime/RuntimeAPITest.cs b/com.unity.ml-agents/Tests/Runtime/RuntimeAPITest.cs
new file mode 100644
index 0000000000..afe452d003
--- /dev/null
+++ b/com.unity.ml-agents/Tests/Runtime/RuntimeAPITest.cs
@@ -0,0 +1,129 @@
+#if UNITY_INCLUDE_TESTS
+using System.Collections;
+using System.Collections.Generic;
+using MLAgents;
+using MLAgents.Policies;
+using MLAgents.Sensors;
+using NUnit.Framework;
+using UnityEngine;
+using UnityEngine.TestTools;
+
+namespace Tests
+{
+
+ public class PublicApiAgent : Agent
+ {
+ public int numHeuristicCalls;
+
+ public override float[] Heuristic()
+ {
+ numHeuristicCalls++;
+ return base.Heuristic();
+ }
+ }// Simple SensorComponent that sets up a StackingSensor
+ public class StackingComponent : SensorComponent
+ {
+ public SensorComponent wrappedComponent;
+ public int numStacks;
+
+ public override ISensor CreateSensor()
+ {
+ var wrappedSensor = wrappedComponent.CreateSensor();
+ return new StackingSensor(wrappedSensor, numStacks);
+ }
+
+ public override int[] GetObservationShape()
+ {
+ int[] shape = (int[]) wrappedComponent.GetObservationShape().Clone();
+ for (var i = 0; i < shape.Length; i++)
+ {
+ shape[i] *= numStacks;
+ }
+
+ return shape;
+ }
+ }
+
+ public class RuntimeApiTest
+ {
+ [SetUp]
+ public static void Setup()
+ {
+ Academy.Instance.AutomaticSteppingEnabled = false;
+ }
+
+ [UnityTest]
+ public IEnumerator RuntimeApiTestWithEnumeratorPasses()
+ {
+ var gameObject = new GameObject();
+
+ var behaviorParams = gameObject.AddComponent();
+ behaviorParams.brainParameters.vectorObservationSize = 3;
+ behaviorParams.brainParameters.numStackedVectorObservations = 2;
+ behaviorParams.brainParameters.vectorActionDescriptions = new[] { "TestActionA", "TestActionB" };
+ behaviorParams.brainParameters.vectorActionSize = new[] { 2, 2 };
+ behaviorParams.brainParameters.vectorActionSpaceType = SpaceType.Discrete;
+ behaviorParams.behaviorName = "TestBehavior";
+ behaviorParams.TeamId = 42;
+ behaviorParams.useChildSensors = true;
+
+
+ // Can't actually create an Agent with InferenceOnly and no model, so change back
+ behaviorParams.behaviorType = BehaviorType.Default;
+
+ var sensorComponent = gameObject.AddComponent();
+ sensorComponent.sensorName = "ray3d";
+ sensorComponent.detectableTags = new List { "Player", "Respawn" };
+ sensorComponent.raysPerDirection = 3;
+
+ // Make a StackingSensor that wraps the RayPerceptionSensorComponent3D
+ // This isn't necessarily practical, just to ensure that it can be done
+ var wrappingSensorComponent = gameObject.AddComponent();
+ wrappingSensorComponent.wrappedComponent = sensorComponent;
+ wrappingSensorComponent.numStacks = 3;
+
+ // ISensor isn't set up yet.
+ Assert.IsNull(sensorComponent.raySensor);
+
+
+ // Make sure we can set the behavior type correctly after the agent is initialized
+ // (this creates a new policy).
+ behaviorParams.behaviorType = BehaviorType.HeuristicOnly;
+
+ // Agent needs to be added after everything else is setup.
+ var agent = gameObject.AddComponent();
+
+ // DecisionRequester has to be added after Agent.
+ var decisionRequester = gameObject.AddComponent();
+ decisionRequester.DecisionPeriod = 2;
+ decisionRequester.TakeActionsBetweenDecisions = true;
+
+
+ // Initialization should set up the sensors
+ Assert.IsNotNull(sensorComponent.raySensor);
+
+ // Let's change the inference device
+ var otherDevice = behaviorParams.inferenceDevice == InferenceDevice.CPU ? InferenceDevice.GPU : InferenceDevice.CPU;
+ agent.SetModel(behaviorParams.behaviorName, behaviorParams.model, otherDevice);
+
+ agent.AddReward(1.0f);
+
+ // skip a frame.
+ yield return null;
+
+ Academy.Instance.EnvironmentStep();
+
+ var actions = agent.GetAction();
+ // default Heuristic implementation should return zero actions.
+ Assert.AreEqual(new[] {0.0f, 0.0f}, actions);
+ Assert.AreEqual(1, agent.numHeuristicCalls);
+
+ Academy.Instance.EnvironmentStep();
+ Assert.AreEqual(1, agent.numHeuristicCalls);
+
+ Academy.Instance.EnvironmentStep();
+ Assert.AreEqual(2, agent.numHeuristicCalls);
+ }
+ }
+}
+#endif
diff --git a/com.unity.ml-agents/Tests/Runtime/RuntimeAPITest.cs.meta b/com.unity.ml-agents/Tests/Runtime/RuntimeAPITest.cs.meta
new file mode 100644
index 0000000000..5f7821402b
--- /dev/null
+++ b/com.unity.ml-agents/Tests/Runtime/RuntimeAPITest.cs.meta
@@ -0,0 +1,11 @@
+fileFormatVersion: 2
+guid: 17878576e4ed14b09875e37394e5ad90
+MonoImporter:
+ externalObjects: {}
+ serializedVersion: 2
+ defaultReferences: []
+ executionOrder: 0
+ icon: {instanceID: 0}
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.ml-agents/Tests/Runtime/SerializeTestScene.unity b/com.unity.ml-agents/Tests/Runtime/SerializeTestScene.unity
deleted file mode 100644
index 954c52ce66..0000000000
--- a/com.unity.ml-agents/Tests/Runtime/SerializeTestScene.unity
+++ /dev/null
@@ -1,429 +0,0 @@
-%YAML 1.1
-%TAG !u! tag:unity3d.com,2011:
---- !u!29 &1
-OcclusionCullingSettings:
- m_ObjectHideFlags: 0
- serializedVersion: 2
- m_OcclusionBakeSettings:
- smallestOccluder: 5
- smallestHole: 0.25
- backfaceThreshold: 100
- m_SceneGUID: 00000000000000000000000000000000
- m_OcclusionCullingData: {fileID: 0}
---- !u!104 &2
-RenderSettings:
- m_ObjectHideFlags: 0
- serializedVersion: 9
- m_Fog: 0
- m_FogColor: {r: 0.5, g: 0.5, b: 0.5, a: 1}
- m_FogMode: 3
- m_FogDensity: 0.01
- m_LinearFogStart: 0
- m_LinearFogEnd: 300
- m_AmbientSkyColor: {r: 0.212, g: 0.227, b: 0.259, a: 1}
- m_AmbientEquatorColor: {r: 0.114, g: 0.125, b: 0.133, a: 1}
- m_AmbientGroundColor: {r: 0.047, g: 0.043, b: 0.035, a: 1}
- m_AmbientIntensity: 1
- m_AmbientMode: 0
- m_SubtractiveShadowColor: {r: 0.42, g: 0.478, b: 0.627, a: 1}
- m_SkyboxMaterial: {fileID: 10304, guid: 0000000000000000f000000000000000, type: 0}
- m_HaloStrength: 0.5
- m_FlareStrength: 1
- m_FlareFadeSpeed: 3
- m_HaloTexture: {fileID: 0}
- m_SpotCookie: {fileID: 10001, guid: 0000000000000000e000000000000000, type: 0}
- m_DefaultReflectionMode: 0
- m_DefaultReflectionResolution: 128
- m_ReflectionBounces: 1
- m_ReflectionIntensity: 1
- m_CustomReflection: {fileID: 0}
- m_Sun: {fileID: 0}
- m_IndirectSpecularColor: {r: 0.44657898, g: 0.49641287, b: 0.5748173, a: 1}
- m_UseRadianceAmbientProbe: 0
---- !u!157 &3
-LightmapSettings:
- m_ObjectHideFlags: 0
- serializedVersion: 11
- m_GIWorkflowMode: 0
- m_GISettings:
- serializedVersion: 2
- m_BounceScale: 1
- m_IndirectOutputScale: 1
- m_AlbedoBoost: 1
- m_EnvironmentLightingMode: 0
- m_EnableBakedLightmaps: 1
- m_EnableRealtimeLightmaps: 1
- m_LightmapEditorSettings:
- serializedVersion: 10
- m_Resolution: 2
- m_BakeResolution: 40
- m_AtlasSize: 1024
- m_AO: 0
- m_AOMaxDistance: 1
- m_CompAOExponent: 1
- m_CompAOExponentDirect: 0
- m_Padding: 2
- m_LightmapParameters: {fileID: 0}
- m_LightmapsBakeMode: 1
- m_TextureCompression: 1
- m_FinalGather: 0
- m_FinalGatherFiltering: 1
- m_FinalGatherRayCount: 256
- m_ReflectionCompression: 2
- m_MixedBakeMode: 2
- m_BakeBackend: 1
- m_PVRSampling: 1
- m_PVRDirectSampleCount: 32
- m_PVRSampleCount: 500
- m_PVRBounces: 2
- m_PVRFilterTypeDirect: 0
- m_PVRFilterTypeIndirect: 0
- m_PVRFilterTypeAO: 0
- m_PVRFilteringMode: 1
- m_PVRCulling: 1
- m_PVRFilteringGaussRadiusDirect: 1
- m_PVRFilteringGaussRadiusIndirect: 5
- m_PVRFilteringGaussRadiusAO: 2
- m_PVRFilteringAtrousPositionSigmaDirect: 0.5
- m_PVRFilteringAtrousPositionSigmaIndirect: 2
- m_PVRFilteringAtrousPositionSigmaAO: 1
- m_ShowResolutionOverlay: 1
- m_LightingDataAsset: {fileID: 0}
- m_UseShadowmask: 1
---- !u!196 &4
-NavMeshSettings:
- serializedVersion: 2
- m_ObjectHideFlags: 0
- m_BuildSettings:
- serializedVersion: 2
- agentTypeID: 0
- agentRadius: 0.5
- agentHeight: 2
- agentSlope: 45
- agentClimb: 0.4
- ledgeDropHeight: 0
- maxJumpAcrossDistance: 0
- minRegionArea: 2
- manualCellSize: 0
- cellSize: 0.16666667
- manualTileSize: 0
- tileSize: 256
- accuratePlacement: 0
- debug:
- m_Flags: 0
- m_NavMeshData: {fileID: 0}
---- !u!1 &106586301
-GameObject:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- serializedVersion: 6
- m_Component:
- - component: {fileID: 106586304}
- - component: {fileID: 106586303}
- - component: {fileID: 106586302}
- m_Layer: 0
- m_Name: Agent
- m_TagString: Untagged
- m_Icon: {fileID: 0}
- m_NavMeshLayer: 0
- m_StaticEditorFlags: 0
- m_IsActive: 1
---- !u!114 &106586302
-MonoBehaviour:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 106586301}
- m_Enabled: 1
- m_EditorHideFlags: 0
- m_Script: {fileID: 11500000, guid: c3d607733e457478885f15ee89725709, type: 3}
- m_Name:
- m_EditorClassIdentifier:
- agentParameters:
- maxStep: 5000
- hasUpgradedFromAgentParameters: 1
- maxStep: 5000
---- !u!114 &106586303
-MonoBehaviour:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 106586301}
- m_Enabled: 1
- m_EditorHideFlags: 0
- m_Script: {fileID: 11500000, guid: 5d1c4e0b1822b495aa52bc52839ecb30, type: 3}
- m_Name:
- m_EditorClassIdentifier:
- m_BrainParameters:
- vectorObservationSize: 1
- numStackedVectorObservations: 1
- vectorActionSize: 01000000
- vectorActionDescriptions: []
- vectorActionSpaceType: 0
- m_Model: {fileID: 0}
- m_InferenceDevice: 0
- m_BehaviorType: 0
- m_BehaviorName: My Behavior
- m_TeamID: 0
- m_UseChildSensors: 1
---- !u!4 &106586304
-Transform:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 106586301}
- m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
- m_LocalPosition: {x: 0, y: 0, z: 0}
- m_LocalScale: {x: 1, y: 1, z: 1}
- m_Children:
- - {fileID: 1471486645}
- m_Father: {fileID: 0}
- m_RootOrder: 2
- m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
---- !u!1 &185701317
-GameObject:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- serializedVersion: 6
- m_Component:
- - component: {fileID: 185701319}
- - component: {fileID: 185701318}
- m_Layer: 0
- m_Name: Directional Light
- m_TagString: Untagged
- m_Icon: {fileID: 0}
- m_NavMeshLayer: 0
- m_StaticEditorFlags: 0
- m_IsActive: 1
---- !u!108 &185701318
-Light:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 185701317}
- m_Enabled: 1
- serializedVersion: 8
- m_Type: 1
- m_Color: {r: 1, g: 0.95686275, b: 0.8392157, a: 1}
- m_Intensity: 1
- m_Range: 10
- m_SpotAngle: 30
- m_CookieSize: 10
- m_Shadows:
- m_Type: 2
- m_Resolution: -1
- m_CustomResolution: -1
- m_Strength: 1
- m_Bias: 0.05
- m_NormalBias: 0.4
- m_NearPlane: 0.2
- m_Cookie: {fileID: 0}
- m_DrawHalo: 0
- m_Flare: {fileID: 0}
- m_RenderMode: 0
- m_CullingMask:
- serializedVersion: 2
- m_Bits: 4294967295
- m_Lightmapping: 4
- m_LightShadowCasterMode: 0
- m_AreaSize: {x: 1, y: 1}
- m_BounceIntensity: 1
- m_ColorTemperature: 6570
- m_UseColorTemperature: 0
- m_ShadowRadius: 0
- m_ShadowAngle: 0
---- !u!4 &185701319
-Transform:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 185701317}
- m_LocalRotation: {x: 0.40821788, y: -0.23456968, z: 0.10938163, w: 0.8754261}
- m_LocalPosition: {x: 0, y: 3, z: 0}
- m_LocalScale: {x: 1, y: 1, z: 1}
- m_Children: []
- m_Father: {fileID: 0}
- m_RootOrder: 1
- m_LocalEulerAnglesHint: {x: 50, y: -30, z: 0}
---- !u!1 &804630118
-GameObject:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- serializedVersion: 6
- m_Component:
- - component: {fileID: 804630121}
- - component: {fileID: 804630120}
- - component: {fileID: 804630119}
- m_Layer: 0
- m_Name: Main Camera
- m_TagString: MainCamera
- m_Icon: {fileID: 0}
- m_NavMeshLayer: 0
- m_StaticEditorFlags: 0
- m_IsActive: 1
---- !u!81 &804630119
-AudioListener:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 804630118}
- m_Enabled: 1
---- !u!20 &804630120
-Camera:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 804630118}
- m_Enabled: 1
- serializedVersion: 2
- m_ClearFlags: 1
- m_BackGroundColor: {r: 0.19215687, g: 0.3019608, b: 0.4745098, a: 0}
- m_projectionMatrixMode: 1
- m_SensorSize: {x: 36, y: 24}
- m_LensShift: {x: 0, y: 0}
- m_GateFitMode: 2
- m_FocalLength: 50
- m_NormalizedViewPortRect:
- serializedVersion: 2
- x: 0
- y: 0
- width: 1
- height: 1
- near clip plane: 0.3
- far clip plane: 1000
- field of view: 60
- orthographic: 0
- orthographic size: 5
- m_Depth: -1
- m_CullingMask:
- serializedVersion: 2
- m_Bits: 4294967295
- m_RenderingPath: -1
- m_TargetTexture: {fileID: 0}
- m_TargetDisplay: 0
- m_TargetEye: 3
- m_HDR: 1
- m_AllowMSAA: 1
- m_AllowDynamicResolution: 0
- m_ForceIntoRT: 0
- m_OcclusionCulling: 1
- m_StereoConvergence: 10
- m_StereoSeparation: 0.022
---- !u!4 &804630121
-Transform:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 804630118}
- m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
- m_LocalPosition: {x: 0, y: 1, z: -10}
- m_LocalScale: {x: 1, y: 1, z: 1}
- m_Children: []
- m_Father: {fileID: 0}
- m_RootOrder: 0
- m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
---- !u!1 &1471486644
-GameObject:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- serializedVersion: 6
- m_Component:
- - component: {fileID: 1471486645}
- - component: {fileID: 1471486648}
- - component: {fileID: 1471486647}
- - component: {fileID: 1471486646}
- m_Layer: 0
- m_Name: Cube
- m_TagString: Untagged
- m_Icon: {fileID: 0}
- m_NavMeshLayer: 0
- m_StaticEditorFlags: 0
- m_IsActive: 1
---- !u!4 &1471486645
-Transform:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 1471486644}
- m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
- m_LocalPosition: {x: 0, y: 0, z: 0}
- m_LocalScale: {x: 1, y: 1, z: 1}
- m_Children: []
- m_Father: {fileID: 106586304}
- m_RootOrder: 0
- m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
---- !u!65 &1471486646
-BoxCollider:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 1471486644}
- m_Material: {fileID: 0}
- m_IsTrigger: 0
- m_Enabled: 1
- serializedVersion: 2
- m_Size: {x: 1, y: 1, z: 1}
- m_Center: {x: 0, y: 0, z: 0}
---- !u!23 &1471486647
-MeshRenderer:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 1471486644}
- m_Enabled: 1
- m_CastShadows: 1
- m_ReceiveShadows: 1
- m_DynamicOccludee: 1
- m_MotionVectors: 1
- m_LightProbeUsage: 1
- m_ReflectionProbeUsage: 1
- m_RenderingLayerMask: 1
- m_RendererPriority: 0
- m_Materials:
- - {fileID: 10303, guid: 0000000000000000f000000000000000, type: 0}
- m_StaticBatchInfo:
- firstSubMesh: 0
- subMeshCount: 0
- m_StaticBatchRoot: {fileID: 0}
- m_ProbeAnchor: {fileID: 0}
- m_LightProbeVolumeOverride: {fileID: 0}
- m_ScaleInLightmap: 1
- m_PreserveUVs: 0
- m_IgnoreNormalsForChartDetection: 0
- m_ImportantGI: 0
- m_StitchLightmapSeams: 0
- m_SelectedEditorRenderState: 3
- m_MinimumChartSize: 4
- m_AutoUVMaxDistance: 0.5
- m_AutoUVMaxAngle: 89
- m_LightmapParameters: {fileID: 0}
- m_SortingLayerID: 0
- m_SortingLayer: 0
- m_SortingOrder: 0
---- !u!33 &1471486648
-MeshFilter:
- m_ObjectHideFlags: 0
- m_CorrespondingSourceObject: {fileID: 0}
- m_PrefabInstance: {fileID: 0}
- m_PrefabAsset: {fileID: 0}
- m_GameObject: {fileID: 1471486644}
- m_Mesh: {fileID: 10202, guid: 0000000000000000e000000000000000, type: 0}
diff --git a/com.unity.ml-agents/Tests/Runtime/Unity.ML-Agents.Runtime.Tests.asmdef b/com.unity.ml-agents/Tests/Runtime/Unity.ML-Agents.Runtime.Tests.asmdef
new file mode 100644
index 0000000000..16cbc07f52
--- /dev/null
+++ b/com.unity.ml-agents/Tests/Runtime/Unity.ML-Agents.Runtime.Tests.asmdef
@@ -0,0 +1,25 @@
+{
+ "name": "Tests",
+ "references": [
+ "Unity.ML-Agents",
+ "Barracuda",
+ "Unity.ML-Agents.CommunicatorObjects",
+ "Unity.ML-Agents.Editor"
+ ],
+ "optionalUnityReferences": [
+ "TestAssemblies"
+ ],
+ "includePlatforms": [],
+ "excludePlatforms": [],
+ "allowUnsafeCode": false,
+ "overrideReferences": true,
+ "precompiledReferences": [
+ "System.IO.Abstractions.dll",
+ "System.IO.Abstractions.TestingHelpers.dll",
+ "Google.Protobuf.dll"
+ ],
+ "autoReferenced": false,
+ "defineConstraints": [
+ "UNITY_INCLUDE_TESTS"
+ ]
+}
diff --git a/com.unity.ml-agents/Tests/Runtime/SerializeTestScene.unity.meta b/com.unity.ml-agents/Tests/Runtime/Unity.ML-Agents.Runtime.Tests.asmdef.meta
similarity index 59%
rename from com.unity.ml-agents/Tests/Runtime/SerializeTestScene.unity.meta
rename to com.unity.ml-agents/Tests/Runtime/Unity.ML-Agents.Runtime.Tests.asmdef.meta
index 5b95bd43f8..4fa9a793f6 100644
--- a/com.unity.ml-agents/Tests/Runtime/SerializeTestScene.unity.meta
+++ b/com.unity.ml-agents/Tests/Runtime/Unity.ML-Agents.Runtime.Tests.asmdef.meta
@@ -1,6 +1,6 @@
fileFormatVersion: 2
-guid: 60783bd849bd242eeb66243542762b23
-DefaultImporter:
+guid: d29014db7ebcd4cf4a14f537fbf02110
+AssemblyDefinitionImporter:
externalObjects: {}
userData:
assetBundleName:
diff --git a/ml-agents-envs/mlagents_envs/environment.py b/ml-agents-envs/mlagents_envs/environment.py
index d750cbe90d..70545c4dbf 100644
--- a/ml-agents-envs/mlagents_envs/environment.py
+++ b/ml-agents-envs/mlagents_envs/environment.py
@@ -58,7 +58,7 @@ class UnityEnvironment(BaseEnv):
# Currently we require strict equality between the communication protocol
# on each side, although we may allow some flexibility in the future.
# This should be incremented whenever a change is made to the communication protocol.
- API_VERSION = "0.15.0"
+ API_VERSION = "0.16.0"
# Default port that the editor listens on. If an environment executable
# isn't specified, this port will be used.
diff --git a/ml-agents/tests/yamato/scripts/run_llapi.py b/ml-agents/tests/yamato/scripts/run_llapi.py
index 1783ae9a00..a20f5f8553 100644
--- a/ml-agents/tests/yamato/scripts/run_llapi.py
+++ b/ml-agents/tests/yamato/scripts/run_llapi.py
@@ -89,6 +89,6 @@ def main(env_name):
if __name__ == "__main__":
parser = argparse.ArgumentParser()
- parser.add_argument("--env", default="Project/testPlayer")
+ parser.add_argument("--env", default="artifacts/testPlayer")
args = parser.parse_args()
main(args.env)
diff --git a/ml-agents/tests/yamato/standalone_build_tests.py b/ml-agents/tests/yamato/standalone_build_tests.py
index f71d8259f8..0a16224d20 100644
--- a/ml-agents/tests/yamato/standalone_build_tests.py
+++ b/ml-agents/tests/yamato/standalone_build_tests.py
@@ -15,7 +15,7 @@ def main(scene_path):
executable_name = "testPlayer-" + executable_name
returncode = run_standalone_build(
- base_path, verbose=True, output_path=executable_name, scene_path=scene_path
+ base_path, output_path=executable_name, scene_path=scene_path
)
if returncode == 0:
diff --git a/ml-agents/tests/yamato/training_int_tests.py b/ml-agents/tests/yamato/training_int_tests.py
index 7ff9074b06..5900e83920 100644
--- a/ml-agents/tests/yamato/training_int_tests.py
+++ b/ml-agents/tests/yamato/training_int_tests.py
@@ -6,6 +6,7 @@
from .yamato_utils import (
get_base_path,
+ get_base_output_path,
run_standalone_build,
init_venv,
override_config_file,
@@ -34,9 +35,12 @@ def run_training(python_version, csharp_version):
if csharp_version is not None:
# We can't rely on the old C# code recognizing the commandline argument to set the output
# So rename testPlayer (containing the most recent build) to something else temporarily
- full_player_path = os.path.join("Project", "testPlayer.app")
- temp_player_path = os.path.join("Project", "temp_testPlayer.app")
- final_player_path = os.path.join("Project", f"testPlayer_{csharp_version}.app")
+ artifact_path = get_base_output_path()
+ full_player_path = os.path.join(artifact_path, "testPlayer.app")
+ temp_player_path = os.path.join(artifact_path, "temp_testPlayer.app")
+ final_player_path = os.path.join(
+ artifact_path, f"testPlayer_{csharp_version}.app"
+ )
os.rename(full_player_path, temp_player_path)
@@ -67,7 +71,8 @@ def run_training(python_version, csharp_version):
)
mla_learn_cmd = (
- f"mlagents-learn override.yaml --train --env=Project/{standalone_player_path} "
+ f"mlagents-learn override.yaml --train --env="
+ f"{os.path.join(get_base_output_path(), standalone_player_path)} "
f"--run-id={run_id} --no-graphics --env-args -logFile -"
) # noqa
res = subprocess.run(
diff --git a/ml-agents/tests/yamato/yamato_utils.py b/ml-agents/tests/yamato/yamato_utils.py
index bcbe7abf9b..8a208b18e5 100644
--- a/ml-agents/tests/yamato/yamato_utils.py
+++ b/ml-agents/tests/yamato/yamato_utils.py
@@ -1,4 +1,5 @@
import os
+import shutil
import subprocess
import yaml
from typing import List, Optional
@@ -24,14 +25,23 @@ def get_base_path():
return os.getcwd()
+def get_base_output_path():
+ """"
+ Returns the artifact folder to use for yamato jobs.
+ """
+ return os.path.join(get_base_path(), "artifacts")
+
+
def run_standalone_build(
base_path: str,
verbose: bool = False,
output_path: str = None,
scene_path: str = None,
+ log_output_path: str = f"{get_base_output_path()}/standalone_build.txt",
) -> int:
"""
- Run BuildStandalonePlayerOSX test to produce a player. The location defaults to Project/testPlayer.
+ Run BuildStandalonePlayerOSX test to produce a player. The location defaults to
+ artifacts/standalone_build/testPlayer.
"""
unity_exe = get_unity_executable_path()
print(f"Running BuildStandalonePlayerOSX via {unity_exe}")
@@ -44,16 +54,33 @@ def run_standalone_build(
"-executeMethod",
"MLAgents.StandaloneBuildTest.BuildStandalonePlayerOSX",
]
- if verbose:
- test_args += ["-logfile", "-"]
+
+ os.makedirs(os.path.dirname(log_output_path), exist_ok=True)
+ subprocess.run(["touch", log_output_path])
+ test_args += ["-logfile", log_output_path]
+
if output_path is not None:
+ output_path = os.path.join(get_base_output_path(), output_path)
test_args += ["--mlagents-build-output-path", output_path]
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
if scene_path is not None:
test_args += ["--mlagents-build-scene-path", scene_path]
print(f"{' '.join(test_args)} ...")
timeout = 30 * 60 # 30 minutes, just in case
res: subprocess.CompletedProcess = subprocess.run(test_args, timeout=timeout)
+
+ # Copy the default build name into the artifacts folder.
+ if output_path is None and res.returncode == 0:
+ shutil.move(
+ os.path.join(base_path, "Project", "testPlayer.app"),
+ os.path.join(get_base_output_path(), "testPlayer.app"),
+ )
+
+ # Print if we fail or want verbosity.
+ if verbose or res.returncode != 0:
+ subprocess.run(["cat", log_output_path])
+
return res.returncode
@@ -107,8 +134,10 @@ def checkout_csharp_version(csharp_version):
"""
if csharp_version is None:
return
+
csharp_dirs = ["com.unity.ml-agents", "Project"]
for csharp_dir in csharp_dirs:
+ subprocess.check_call(f"rm -rf {csharp_dir}", shell=True)
subprocess.check_call(
f"git checkout {csharp_version} -- {csharp_dir}", shell=True
)
@@ -120,6 +149,8 @@ def undo_git_checkout():
"""
subprocess.check_call("git reset HEAD .", shell=True)
subprocess.check_call("git checkout -- .", shell=True)
+ # Ensure the cache isn't polluted with old compiled assemblies.
+ subprocess.check_call(f"rm -rf Project/Library", shell=True)
def override_config_file(src_path, dest_path, **kwargs):