Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename autorewarded to auto_rewarded in rewards table #2498

Merged
merged 2 commits into from
Jul 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion allensdk/brain_observatory/behavior/behavior_session.py
Original file line number Diff line number Diff line change
Expand Up @@ -937,7 +937,7 @@ def rewards(self) -> pd.DataFrame:
0.007 if earned reward, 0.005 if auto reward.
timestamps: (float)
time in seconds
autorewarded: (bool)
auto_rewarded: (bool)
True if free reward was delivered for that trial.
Occurs during the first 5 trials of a session and
throughout as needed
Expand Down
8 changes: 4 additions & 4 deletions allensdk/brain_observatory/behavior/data_objects/rewards.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def from_stimulus_file(
data = stimulus_file.data

trial_df = pd.DataFrame(data["items"]["behavior"]["trial_log"])
rewards_dict = {"volume": [], "timestamps": [], "autorewarded": []}
rewards_dict = {"volume": [], "timestamps": [], "auto_rewarded": []}
for idx, trial in trial_df.iterrows():
rewards = trial["rewards"]
# as i write this there can only ever be one reward per trial
Expand All @@ -47,7 +47,7 @@ def from_stimulus_file(
rewards_dict["timestamps"].append(
stimulus_timestamps.value[rewards[0][2]])
auto_rwrd = trial["trial_params"]["auto_reward"]
rewards_dict["autorewarded"].append(auto_rwrd)
rewards_dict["auto_rewarded"].append(auto_rwrd)

df = pd.DataFrame(rewards_dict)
return cls(rewards=df)
Expand All @@ -67,7 +67,7 @@ def from_nwb(cls, nwbfile: NWBFile) -> Optional["Rewards"]:
df = pd.DataFrame({
'volume': volume,
'timestamps': time,
'autorewarded': autorewarded})
'auto_rewarded': autorewarded})
return cls(rewards=df)

def to_nwb(self, nwbfile: NWBFile) -> NWBFile:
Expand All @@ -87,7 +87,7 @@ def to_nwb(self, nwbfile: NWBFile) -> NWBFile:

autorewarded_ts = TimeSeries(
name='autorewarded',
data=self.value['autorewarded'].values,
data=self.value['auto_rewarded'].values,
timestamps=reward_volume_ts.timestamps,
unit='mL'
)
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def _create_dummy_stimulus_file():

expected_dict = {'volume': [0.001, 0.002],
'timestamps': [0.04, 0.1],
'autorewarded': [True, False]}
'auto_rewarded': [True, False]}
expected_df = pd.DataFrame(expected_dict)
expected_df = expected_df
assert expected_df.equals(rewards.value)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def test_visbeh_ophys_data_set():
assert len(data_set.licks) == 2421 and set(data_set.licks.columns) \
== set(['timestamps', 'frame'])
assert len(data_set.rewards) == 85 and set(data_set.rewards.columns) == \
set(['timestamps', 'volume', 'autorewarded'])
set(['timestamps', 'volume', 'auto_rewarded'])
assert len(data_set.corrected_fluorescence_traces) == 258 and \
set(data_set.corrected_fluorescence_traces.columns) == \
set(['cell_roi_id', 'corrected_fluorescence'])
Expand Down