Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: map input to unit range & output tensors from unit range in inference_engine_t infer procedure #139

Merged
merged 5 commits into from
Apr 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions cloud-microphysics/app/train-cloud-microphysics.f90
Original file line number Diff line number Diff line change
Expand Up @@ -238,15 +238,15 @@ subroutine read_train_write(training_configuration, base_name, plot_unit, previo
else
close(network_unit)

print *,"Calculating inputs tensor component ranges."
print *,"Calculating input tensor component ranges."
input_range = tensor_range_t( &
layer = "inputs", &
minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), &
minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], &
maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), &
maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] &
)
print *,"Calculating outputs tensor component ranges."
print *,"Calculating output tensor component ranges."
output_range = tensor_range_t( &
layer = "outputs", &
minima = [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], &
Expand Down Expand Up @@ -297,10 +297,10 @@ subroutine read_train_write(training_configuration, base_name, plot_unit, previo
] &
), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], time = start_step, end_step, stride)]

print *,"Normalizing inputs tensors"
print *,"Normalizing input tensors"
inputs = input_range%map_to_unit_range(inputs)

print *,"Normalizing outputs tensors"
print *,"Normalizing output tensors"
outputs = output_range%map_to_unit_range(outputs)

print *, "Eliminating",int(100*(1.-keep)),"% of the grid points that have all-zero time derivatives"
Expand Down
4 changes: 2 additions & 2 deletions cloud-microphysics/fpm.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ author = "Damian Rouson, Tan Nguyen, Jordan Welsman, David Torres, Brad Richards
maintainer = "rouson@lbl.gov"

[dependencies]
assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.5.0"}
sourcery = {git = "https://github.com/sourceryinstitute/sourcery", tag = "4.5.3"}
assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.6.0"}
sourcery = {git = "https://github.com/sourceryinstitute/sourcery", tag = "4.6.1"}
inference-engine = {path = "../"}
netcdf-interfaces = {git = "https://github.com/LKedward/netcdf-interfaces.git", rev = "d2bbb71ac52b4e346b62572b1ca1620134481096"}
6 changes: 3 additions & 3 deletions src/inference_engine/inference_engine_m_.f90
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ module inference_engine_m_
type inference_engine_t
!! Encapsulate the minimal information needed to perform inference
private
type(tensor_range_t) inputs_range_, outputs_range_
type(tensor_range_t) input_range_, output_range_
type(string_t) metadata_(size(key))
real(rkind), allocatable :: weights_(:,:,:), biases_(:,:)
integer, allocatable :: nodes_(:)
Expand Down Expand Up @@ -59,13 +59,13 @@ module inference_engine_m_

interface inference_engine_t

pure module function construct_from_padded_arrays(metadata, weights, biases, nodes, inputs_range, outputs_range) &
pure module function construct_from_padded_arrays(metadata, weights, biases, nodes, input_range, output_range) &
result(inference_engine)
implicit none
type(string_t), intent(in) :: metadata(:)
real(rkind), intent(in) :: weights(:,:,:), biases(:,:)
integer, intent(in) :: nodes(0:)
type(tensor_range_t), intent(in), optional :: inputs_range, outputs_range
type(tensor_range_t), intent(in), optional :: input_range, output_range
type(inference_engine_t) inference_engine
end function

Expand Down
57 changes: 31 additions & 26 deletions src/inference_engine/inference_engine_s.F90
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,12 @@

associate(w => self%weights_, b => self%biases_, n => self%nodes_, output_layer => ubound(self%nodes_,1))


allocate(a(maxval(n), input_layer:output_layer))

a(1:n(input_layer),input_layer) = inputs%values()
associate(normalized_inputs => self%input_range_%map_to_unit_range(inputs))
a(1:n(input_layer),input_layer) = normalized_inputs%values()
end associate

feed_forward: &
do l = input_layer+1, output_layer
Expand All @@ -47,7 +50,9 @@
end associate
end do feed_forward

outputs = tensor_t(a(1:n(output_layer), output_layer))
associate(normalized_outputs => tensor_t(a(1:n(output_layer), output_layer)))
outputs = self%output_range_%map_from_unit_range(normalized_outputs)
end associate

end associate

Expand Down Expand Up @@ -128,22 +133,22 @@ pure subroutine set_activation_strategy(inference_engine)
block
integer i

if (present(inputs_range)) then
inference_engine%inputs_range_ = inputs_range
if (present(input_range)) then
inference_engine%input_range_ = input_range
else
associate(num_inputs => nodes(lbound(nodes,1)))
associate(default_minima => [(0., i=1,num_inputs)], default_maxima => [(1., i=1,num_inputs)])
inference_engine%inputs_range_ = tensor_range_t("inputs", default_minima, default_maxima)
inference_engine%input_range_ = tensor_range_t("inputs", default_minima, default_maxima)
end associate
end associate
end if

if (present(outputs_range)) then
inference_engine%outputs_range_ = outputs_range
if (present(output_range)) then
inference_engine%output_range_ = output_range
else
associate(num_outputs => nodes(ubound(nodes,1)))
associate(default_minima => [(0., i=1,num_outputs)], default_maxima => [(1., i=1,num_outputs)])
inference_engine%outputs_range_ = tensor_range_t("outputs", default_minima, default_maxima)
inference_engine%output_range_ = tensor_range_t("outputs", default_minima, default_maxima)
end associate
end associate
end if
Expand All @@ -157,7 +162,7 @@ pure subroutine set_activation_strategy(inference_engine)
module procedure construct_from_json

type(string_t), allocatable :: lines(:), metadata(:)
type(tensor_range_t) inputs_range, outputs_range
type(tensor_range_t) input_range, output_range
type(layer_t) hidden_layers, output_layer
type(neuron_t) output_neuron
real(rkind), allocatable :: hidden_weights(:,:,:)
Expand Down Expand Up @@ -189,9 +194,9 @@ pure subroutine set_activation_strategy(inference_engine)

associate(prototype => tensor_range_t("",[0.],[1.]))
associate(num_lines => size(prototype%to_json()))
inputs_range = tensor_range_t(lines(l:l+num_lines-1))
input_range = tensor_range_t(lines(l:l+num_lines-1))
l = l + num_lines
outputs_range = tensor_range_t(lines(l:l+num_lines-1))
output_range = tensor_range_t(lines(l:l+num_lines-1))
l = l + num_lines
end associate
end associate
Expand Down Expand Up @@ -342,7 +347,7 @@ function get_key_value(line) result(value_)
character(len=17) :: single_value
integer, parameter :: &
outer_object_braces = 2, hidden_layer_outer_brackets = 2, lines_per_neuron = 4, inner_brackets_per_layer = 2, &
output_layer_brackets = 2, metadata_outer_braces = 2, inputs_range_object = 5, outputs_range_object = 5
output_layer_brackets = 2, metadata_outer_braces = 2, input_range_object = 5, output_range_object = 5

call assert_consistency(self)

Expand All @@ -360,7 +365,7 @@ function get_key_value(line) result(value_)
associate(num_lines => &
outer_object_braces &
+ metadata_outer_braces + size(key) &
+ inputs_range_object + outputs_range_object &
+ input_range_object + output_range_object &
+ hidden_layer_outer_brackets + (num_hidden_layers)*(inner_brackets_per_layer + neurons_per_layer*lines_per_neuron) &
+ output_layer_brackets + num_outputs*lines_per_neuron &
)
Expand Down Expand Up @@ -392,24 +397,24 @@ function get_key_value(line) result(value_)
lines(line) = string_t(' },')

block
type(string_t), allocatable :: inputs_range_json(:), outputs_range_json(:)
type(string_t), allocatable :: input_range_json(:), output_range_json(:)

line = line + 1
inputs_range_json = self%inputs_range_%to_json()
associate(last_line => ubound(inputs_range_json,1))
call assert(last_line==inputs_range_object, "inference_engine_s(to_json): inputs_range object line count")
inputs_range_json(last_line) = inputs_range_json(last_line) // ","
lines(line:line+inputs_range_object-1) = inputs_range_json
line = line + inputs_range_object-1
input_range_json = self%input_range_%to_json()
associate(last_line => ubound(input_range_json,1))
call assert(last_line==input_range_object, "inference_engine_s(to_json): input_range object line count")
input_range_json(last_line) = input_range_json(last_line) // ","
lines(line:line+input_range_object-1) = input_range_json
line = line + input_range_object-1
end associate

line = line + 1
outputs_range_json = self%outputs_range_%to_json()
associate(last_line => ubound(outputs_range_json,1))
call assert(last_line==outputs_range_object, "inference_engine_s(to_json): outputs_range object line count")
outputs_range_json(last_line) = outputs_range_json(last_line) // ","
lines(line:line+outputs_range_object-1) = outputs_range_json
line = line + inputs_range_object-1
output_range_json = self%output_range_%to_json()
associate(last_line => ubound(output_range_json,1))
call assert(last_line==output_range_object, "inference_engine_s(to_json): output_range object line count")
output_range_json(last_line) = output_range_json(last_line) // ","
lines(line:line+output_range_object-1) = output_range_json
line = line + input_range_object-1
end associate
end block

Expand Down
8 changes: 0 additions & 8 deletions src/inference_engine/tensor_range_s.f90
Original file line number Diff line number Diff line change
Expand Up @@ -73,23 +73,15 @@

module procedure map_to_unit_range
associate(tensor_values => tensor%values())
call assert(all(tensor_values>=self%minima_) .and. all(tensor_values<=self%maxima_), &
"tensor_range_s(map_to_unit_range): unnormalized range")
associate(normalized_values => (tensor_values - self%minima_)/(self%maxima_ - self%minima_))
call assert(all(normalized_values>=0.) .and. all(normalized_values<=1.), &
"tensor_range_s(map_to_unit_range): normalized range")
normalized_tensor = tensor_t(normalized_values)
end associate
end associate
end procedure

module procedure map_from_unit_range
associate(tensor_values => tensor%values())
call assert(all(tensor_values>=0.).and.all(tensor_values<=1.), &
"tensor_range_s(map_from_unit_range): normalized input")
associate(unnormalized_values => self%minima_ + tensor_values*(self%maxima_ - self%minima_))
call assert(all([unnormalized_values>=self%minima_, unnormalized_values<=self%maxima_]), &
"tensor_range_s(map_to_unit_range): unnormalized range")
unnormalized_tensor = tensor_t(unnormalized_values)
end associate
end associate
Expand Down
8 changes: 6 additions & 2 deletions src/inference_engine/trainable_engine_s.F90
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,9 @@

allocate(a(maxval(n), input_layer:output_layer)) ! Activations

a(1:n(input_layer),input_layer) = inputs%values()
associate(normalized_inputs => self%input_range_%map_to_unit_range(inputs))
a(1:n(input_layer),input_layer) = normalized_inputs%values()
end associate

feed_forward: &
do l = 1,output_layer
Expand All @@ -77,7 +79,9 @@
)
end do feed_forward

outputs = tensor_t(a(1:n(output_layer),output_layer))
associate(normalized_outputs => tensor_t(a(1:n(output_layer), output_layer)))
outputs = self%output_range_%map_from_unit_range(normalized_outputs)
end associate

end associate

Expand Down
Loading