Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

C++ CLI: Create results.json and handle analysis.json (create objectives.json) #5028

Merged
merged 10 commits into from
Nov 13, 2023
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,12 @@ def arguments(model = nil)
return args
end #end the arguments method

def outputs
result = OpenStudio::Measure::OSOutputVector.new
result << OpenStudio::Measure::OSOutput.makeDoubleOutput('net_site_energy', false)
return result
end

#define what happens when the measure is run
def run(runner, user_arguments)
super(runner, user_arguments)
Expand Down Expand Up @@ -130,6 +136,9 @@ def run(runner, user_arguments)
end
end

runner.registerValue("net_site_energy", "Net Site Energy", sqlFile.netSiteEnergy.get, "GJ")
runner.registerValue("something!with.invalid_chars_", "Test Sanitizing", 1, "")

#closing the sql file
sqlFile.close()

Expand Down
14 changes: 14 additions & 0 deletions resources/Examples/compact_osw/update_seb_model.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# Just for convenience: having to VT the seb model which is at 1.11.5
# can be very time consuming, especially on debug builds

require 'openstudio'

model_path = File.join(__dir__, 'files/seb.osm')
raise "#{model_path} not found" if !File.file?(model_path)

# Starting at 3.7.0, no need to explicitly call vt, but still doing it
# m = OpenStudio::Model::Model::load(model_path).get()
vt = OpenStudio::OSVersion::VersionTranslator.new
OpenStudio::Logger.instance.standardOutLogger.setLogLevel(OpenStudio::Debug)
m = vt.loadModel(model_path).get()
m.save(model_path, true)
50 changes: 50 additions & 0 deletions resources/Examples/with_analysis/analysis.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
{
"analysis": {
"display_name": "Test With Analysis JSON",
"name": "analysis_json",
"output_variables": [
{
"objective_function": true,
"name": "FakeReport.net_site_energy",
"objective_function_index": 0,
"objective_function_target": 0,
"objective_function_group": 1,
"scaling_factor": 1.0,
"display_name": "Net Site Energy, should be there",
"display_name_short": "net_site_energy",
"metadata_id": null,
"visualize": true,
"export": true,
"variable_type": "double"
},
{
"objective_function": false,
"name": "FakeReport.net_site_energy",
"objective_function_index": 1,
"objective_function_target": 0,
"objective_function_group": 1,
"scaling_factor": 1.0,
"display_name": "net_site_energy",
"display_name_short": "net_site_energy",
"metadata_id": null,
"visualize": true,
"export": true,
"variable_type": "double"
},
{
"objective_function": true,
"name": "IsNonExisting.NonExisting",
"objective_function_index": 2,
"objective_function_target": 0,
"objective_function_group": 1,
"scaling_factor": 1.0,
"display_name": "net_site_energy",
"display_name_short": "net_site_energy",
"metadata_id": null,
"visualize": true,
"export": true,
"variable_type": "double"
}
]
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
require 'openstudio'

class FakeReport < OpenStudio::Measure::ReportingMeasure

def name
return "Fake Report"
end

#define the arguments that the user will input
def arguments(model = nil)
args = OpenStudio::Measure::OSArgumentVector.new

return args
end #end the arguments method

def outputs
result = OpenStudio::Measure::OSOutputVector.new
result << OpenStudio::Measure::OSOutput.makeDoubleOutput('net_site_energy', false)
return result
end

#define what happens when the measure is run
def run(runner, user_arguments)
super(runner, user_arguments)

#use the built-in error checking
if not runner.validateUserArguments(arguments(), user_arguments)
return false
end

# Register some constant values, so we can only do it during post
# processing
runner.registerValue("net_site_energy", "Net Site Energy", 167.1, "GJ")
runner.registerValue("something!with.invalid_chars_", "Test Sanitizing", 1, "")
runner.registerFinalCondition("Goodbye.")

return true

end

end

FakeReport.new.registerWithApplication
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
<?xml version="1.0"?>
<measure>
<schema_version>3.1</schema_version>
<name>fake_report</name>
<uid>ca6ba362-ea57-4236-b803-17e37b0c0817</uid>
<version_id>32617b1d-91b1-4325-9a3a-0708b9853d29</version_id>
<version_modified>2023-11-13T17:08:41Z</version_modified>
<xml_checksum>B2AD275E</xml_checksum>
<class_name>FakeReport</class_name>
<display_name>Fake Report</display_name>
<description>Change me</description>
<modeler_description>Change me</modeler_description>
<arguments />
<outputs>
<output>
<name>net_site_energy</name>
<display_name>net_site_energy</display_name>
<short_name>net_site_energy</short_name>
<type>Double</type>
<model_dependent>false</model_dependent>
</output>
</outputs>
<provenances />
<tags>
<tag>Reporting.QAQC</tag>
</tags>
<attributes>
<attribute>
<name>Measure Type</name>
<value>ReportingMeasure</value>
<datatype>string</datatype>
</attribute>
<attribute>
<name>Measure Language</name>
<value>Ruby</value>
<datatype>string</datatype>
</attribute>
<attribute>
<name>Uses SketchUp API</name>
<value>false</value>
<datatype>boolean</datatype>
</attribute>
</attributes>
<files>
<file>
<version>
<software_program>OpenStudio</software_program>
<identifier>1.1.2</identifier>
<min_compatible>1.1.2</min_compatible>
</version>
<filename>measure.rb</filename>
<filetype>rb</filetype>
<usage_type>script</usage_type>
<checksum>4CFB5158</checksum>
</file>
</files>
</measure>
8 changes: 8 additions & 0 deletions resources/Examples/with_analysis/local/with_analysis.osw
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"steps": [
{
"measure_dir_name": "FakeReport",
"arguments": {}
}
]
}
12 changes: 12 additions & 0 deletions src/cli/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,12 @@ endif()

if(BUILD_TESTING)

# Just for convenience: having to VT the seb model which is at 1.11.5 can be very time consuming, especially on debug builds
add_custom_target(update_seb_osm_in_build_dir
COMMAND $<TARGET_FILE:openstudio> execute_ruby_script "${PROJECT_BINARY_DIR}/resources/Examples/compact_osw/update_seb_model.rb"
DEPENDS openstudio
)

add_test(NAME OpenStudioCLI.help
COMMAND $<TARGET_FILE:openstudio> --help
)
Expand Down Expand Up @@ -242,6 +248,12 @@ if(BUILD_TESTING)
add_test(NAME OpenStudioCLI.test_loglevel
COMMAND ${Python_EXECUTABLE} -m pytest --verbose ${Pytest_XDIST_OPTS} --os-cli-path $<TARGET_FILE:openstudio> "${CMAKE_CURRENT_SOURCE_DIR}/test/test_loglevel.py"
)

# No Xdist on purpose here
add_test(NAME OpenStudioCLI.test_with_analysis
COMMAND ${Python_EXECUTABLE} -m pytest --verbose --os-cli-path $<TARGET_FILE:openstudio> "${CMAKE_CURRENT_SOURCE_DIR}/test/test_with_analysis.py"
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/resources/Examples/with_analysis/local/"
)
else()
# TODO: Remove. Fallback on these for now, as I don't know if CI has pytest installed
add_test(NAME OpenStudioCLI.Classic.test_logger_rb
Expand Down
87 changes: 87 additions & 0 deletions src/cli/test/test_with_analysis.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import json
import subprocess
from pathlib import Path

import pytest


@pytest.mark.parametrize(
"is_labs",
[pytest.param(True, id="labs"), pytest.param(False, id="classic")],
)
def test_run_with_analysis(osclipath, is_labs: bool):
base_osw_path = Path("with_analysis.osw").resolve()
assert base_osw_path.is_file(), f"{base_osw_path=} is not found"

osw = json.loads(base_osw_path.read_text())
suffix = "labs" if is_labs else "classic"
osw_path = base_osw_path.parent / f"with_analysis_{suffix}.osw"
runDir = base_osw_path.parent / f"run_{suffix}"
osw["run_directory"] = str(runDir)
runDir.mkdir(exist_ok=True)
with open(osw_path, "w") as f:
json.dump(osw, fp=f, indent=2, sort_keys=True)

# Fake having an in.idf or it won't run in the "classic" subcommand, doing it for labs too so that it's less
# confusing
# if not is_labs:
with open(runDir / "in.idf", "w") as f:
f.write("Building,;")

command = [str(osclipath)]
if not is_labs:
command.append("classic")
command += ["run", "--postprocess_only", "-w", str(osw_path)]
lines = subprocess.check_output(command, encoding="utf-8").splitlines()

assert runDir.exists()
measure_attributes_path = runDir / "measure_attributes.json"
assert measure_attributes_path.is_file()
results_path = runDir / "results.json"
assert results_path.is_file()
objectives_path = runDir / "objectives.json"
assert objectives_path.is_file()

measure_attributes = json.loads(measure_attributes_path.read_text())
assert measure_attributes == {
"FakeReport": {"applicable": True, "net_site_energy": 167.1, "something_with_invalid_chars": 1}
}

results = json.loads(results_path.read_text())
assert results == {"FakeReport": {"applicable": True, "net_site_energy": 167.1, "something_with_invalid_chars": 1}}

objectives = json.loads(objectives_path.read_text())
assert objectives == {
"objective_function_1": 167.1,
"objective_function_3": 1.7976931348623157e308,
"objective_function_group_1": 1.0,
"objective_function_group_3": None,
"objective_function_target_1": 0.0,
"objective_function_target_3": None,
"scaling_factor_1": 1.0,
"scaling_factor_3": None,
}

expected_files_in_run_dir = {
"data_point.zip",
"finished.job",
"in.idf",
"measure_attributes.json",
"objectives.json",
"results.json",
"run.log",
"started.job",
# TODO: see below
"data_point_out.json",
}
# TODO: I'm letting this test fail so it's obvious this needs to be addressed
if True: # not is_labs:
# We get the SAME exact info in measure_attributes.json, results.json and data_point_out.json...
# measure_attributes.json is flushed after each apply measure Step (ModelMeasures, EnergyPlusMeasures,
# ReportingMeasures), then at the end of ReportingMeasures it's done once again and results.json is spat out too
# Do we really need the data_point_out.json in addition to this?
# Seems like we could just run the output of results.json/data_point_out.json at the end of the workflow run
# instead
expected_files_in_run_dir.add("data_point_out.json")
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@kbenne @brianlball Here is the TODO I want to adress (with a voluntarily failing tests so it doesn't slip by).

Do we really need all three of these json files? if so, when?


assert set([x.name for x in runDir.glob("*")]) == expected_files_in_run_dir
22 changes: 19 additions & 3 deletions src/workflow/ApplyMeasure.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,9 @@ void OSWorkflow::applyMeasures(MeasureType measureType, bool energyplus_output_r
runner.incrementStep();
result.setStepResult(StepResult::Skip);
}

// Technically here I would need to have gotten className from the measure to match workflow-gem, just to set applicable = false
output_attributes[step.name().value_or(measureDirName)]["applicable"] = openstudio::Variant(false);
}
continue;
}
Expand Down Expand Up @@ -303,16 +306,29 @@ end
// if doing output requests we are done now
if (!energyplus_output_requests) {
WorkflowStepResult result = runner.result();
if (auto stepResult_ = result.stepResult()) {
LOG(Debug, "Step Result: " << stepResult_->valueName());
}

// incrementStep must be called after run
runner.incrementStep();
if (auto errors = result.stepErrors(); !errors.empty()) {
ensureBlock(true);
throw std::runtime_error(fmt::format("Measure {} reported an error with [{}]", measureDirName, fmt::join(errors, "\n")));
}

const auto measureName = step.name().value_or(className);
auto& measureAttributes = output_attributes[measureName];
for (const auto& stepValue : result.stepValues()) {
measureAttributes[stepValue.name()] = stepValue.valueAsVariant();
}
auto stepResult_ = result.stepResult();
if (!stepResult_.has_value()) {
LOG_AND_THROW("Step Result not set for " << scriptPath_->generic_string());
}

// Add an applicability flag to all the measure results
const StepResult stepResult = std::move(*stepResult_);
LOG(Debug, "Step Result: " << stepResult.valueName());
measureAttributes["applicable"] = openstudio::Variant(!((stepResult == StepResult::NA) || (stepResult == StepResult::Skip)));

if (measureType == MeasureType::ModelMeasure) {
updateLastWeatherFileFromModel();
}
Expand Down
14 changes: 14 additions & 0 deletions src/workflow/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,17 @@ add_library(openstudio_workflow
)

target_link_libraries(openstudio_workflow PRIVATE openstudiolib)

if(BUILD_TESTING)
set(openstudio_workflow_test_depends
openstudio_workflow
CONAN_PKG::boost # Maybe at some point replace with openstudiolib more simply
CONAN_PKG::fmt
)

set(openstudio_workflow_test_src
test/Util_GTest.cpp
)

CREATE_TEST_TARGETS(openstudio_workflow "${openstudio_workflow_test_src}" "${openstudio_workflow_test_depends}")
endif()
Loading