Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Epicsarch qs #378

Merged
merged 21 commits into from
Apr 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
af4d09c
ENH: Adding --update feature to epicsarch-qs (in progress)
c-tsoi Oct 5, 2023
a256c15
changed the helper function to return updated dictionary to a list
c-tsoi Oct 27, 2023
a1612e4
able to update entries with matching aliases and PVs, and check qs fo…
c-tsoi Nov 13, 2023
20eca00
Refining with exceptions
c-tsoi Nov 15, 2023
1c27469
Able to identify duplicates and write to the correctly to the archfile
c-tsoi Dec 7, 2023
139c817
Able to identify duplicates and write to the correctly to the archfil…
c-tsoi Dec 7, 2023
e7dd922
Adding a method to pull all user objects from the cds tab in the ques…
c-tsoi Jan 23, 2024
52e54c1
Adding softlink feature, written, but still needs to test. Also, move…
c-tsoi Feb 8, 2024
bfaeb73
adding comments for better readability
c-tsoi Feb 8, 2024
cf40af7
tested running from xppopr and soft link feature
c-tsoi Feb 21, 2024
2cbcf40
testing cds items
c-tsoi Mar 13, 2024
9992d9c
Fixing pre-commit errors and updating files
c-tsoi Mar 13, 2024
d4aa2ea
Cleaned test_print_dry and marked with xfail_2.
c-tsoi Mar 19, 2024
fe4c640
Update hutch_python/epics_arch.py
c-tsoi Mar 21, 2024
a537f74
Update hutch_python/epics_arch.py
c-tsoi Mar 21, 2024
cb551d2
Update hutch_python/epics_arch.py
c-tsoi Mar 25, 2024
3e83fbc
Update hutch_python/qs_load.py
c-tsoi Mar 25, 2024
f3a0a9c
Update hutch_python/epics_arch.py
c-tsoi Mar 25, 2024
e5f7037
Update hutch_python/epics_arch.py
c-tsoi Mar 25, 2024
91b2840
Fixing issues addressed in the PR.
c-tsoi Mar 27, 2024
29fa3ce
Removing a single debugging message. Adding more information to help …
c-tsoi Apr 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
303 changes: 290 additions & 13 deletions hutch_python/epics_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,32 @@
import argparse
import logging
import os
import re
import subprocess
import sys
from dataclasses import dataclass

from prettytable import PrettyTable

from .constants import EPICS_ARCH_FILE_PATH
from .qs_load import get_qs_client

try:
import psdm_qs_cli
from psdm_qs_cli import QuestionnaireClient
except ImportError:
psdm_qs_cli = None
QuestionnaireClient = None


# Annotation with dataclass, making struct to help organize cds objects in prettytable
@dataclass
class QStruct:
alias: str
pvbase: str
pvtype: str


logger = logging.getLogger(__name__)


Expand All @@ -28,7 +49,16 @@ def _create_parser():

parser.add_argument('--dry-run', action='store_true', default=False,
help='Print to stdout what would be written in the '
'archFIle.')
'archFile.')
c-tsoi marked this conversation as resolved.
Show resolved Hide resolved

parser.add_argument('--level', '-l', required=False, type=str, default="INFO",
help='Change the logging level, e.g. DEBUG to show the debug logging stream')

parser.add_argument('--cds-items', nargs=2, action='store', default=None,
help="Pulls all data from CDS tab in the form of a dictionary. E.g.: xppx1003221 --cds-items run_xx experiment_name, where xx is the run number. This option will not automatically update the archfile.")

parser.add_argument('--softlink', '-sl', action='store_true', default=None, help=" create softlink for experiment. This is run after the archfile has been updated/created.")
parser.add_argument('--link-path', '-sl_path', action='store', default=EPICS_ARCH_FILE_PATH, help="Provide user with option to supply custom path for softlink. Defaults to: /cds/group/pcds/dist/pds/{}/misc/.")
return parser


Expand All @@ -37,10 +67,18 @@ def main():
parser = _create_parser()
parsed_args = parser.parse_args()
kwargs = vars(parsed_args)
logger_setup(parsed_args)
create_arch_file(**kwargs)


def create_arch_file(experiment, hutch=None, path=None, dry_run=False):
def logger_setup(args):
# Setting up the logger, to show the level when enabled
logging.getLogger().addHandler(logging.NullHandler())
logging.basicConfig(level="INFO")
logger.setLevel(args.level)


def create_arch_file(experiment, level=None, hutch=None, path=None, dry_run=False, cds_items=None, softlink=None, link_path=None):
"""
Create an epicsArch file for the experiment.

Expand All @@ -56,6 +94,10 @@ def create_arch_file(experiment, hutch=None, path=None, dry_run=False):
dry_run : bool
To indicate if only print to stdout the data that would be stored
in the epicsArch file and not create the file.
link_path : str
Path to overwrite softlink to experiment specific arch file.
cds_items : str
Pulls cds tab data in the form of a dictionary and prints this to Pretty Table.

Examples
--------
Expand Down Expand Up @@ -87,6 +129,9 @@ def create_arch_file(experiment, hutch=None, path=None, dry_run=False):
file_path = None
if experiment and not dry_run:
# set the path to write the epicsArch file to.
if cds_items:
pull_cds_data(experiment, cds_items)
return
if path:
if path and not os.path.exists(path):
raise OSError('Invalid path: %s' % path)
Expand All @@ -95,11 +140,203 @@ def create_arch_file(experiment, hutch=None, path=None, dry_run=False):
file_path = EPICS_ARCH_FILE_PATH.format(hutch.lower())
else:
file_path = EPICS_ARCH_FILE_PATH.format(experiment[0:3])
create_file(exp_name=experiment, path=file_path)
update_file(exp_name=experiment, path=file_path)
if softlink:
create_softlink(experiment, link_path)
elif dry_run:
print_dry_run(experiment)


def pull_cds_data(exp, run):
"""
Gather all user objects from the CDS tab in the questionnaire.
Parse objects and separate them based on type.
Display them in the console via PrettyTable.

Parameters
----------
exp: ``str``
The experiment's name e.g. xppx1003221
run: ''str''
The run number e.g. run21

Outputs
-------
PrettyTable visualization of cds objects


"""
"""
pull run data from questionnaire api, then take the data and sort it
create Pretty Table instance and if the values from the run data contain pcdssetup
then put them into a seperate dictionary as they are cds items
"""
logger.debug('pull_cds_items(%s)', exp)
client = QuestionnaireClient()
logger.debug("in cds items, run numb:", str(run[1]))
runDetails_Dict = client.getProposalDetailsForRun(str(run[0]), str(run[1]))
sorted_runDetails_Dict = dict(sorted(runDetails_Dict.items()))
cds_dict = {}
myTable = PrettyTable(["Alias", "PV Base", "Type"])
for keys, vals in sorted_runDetails_Dict.items():
if "pcdssetup" in keys:
cds_dict[keys] = vals

"""
names are as follows:
pcdssetup-motors, pcdssetup-areadet, pcdssetup-ao, pcdssetup-devs
pcdssetup-ps, pcdssetup-trig, pcdssetup-vacuum, pcdssetup-temp

iterate through all cds items and label them based on their type
use the struct members to identify
"""
displayList = []
for k, v in cds_dict.items():
if re.match('pcdssetup-motors.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "motors"))
elif re.match('pcdssetup-areadet.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "areadet"))
elif re.match('pcdssetup-ao.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "analog output"))
elif re.match('pcdssetup-devs.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "other devices"))
elif re.match('pcdssetup-ps.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvname', k), '')
displayList.append(QStruct(v, pv, "power supplies"))
elif re.match('pcdssetup-trig.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "triggers"))
elif re.match('pcdssetup-vacuum.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "vacuum"))
elif re.match('pcdssetup-temp.*-name', k):
pv = cds_dict.get(re.sub('name', 'pvbase', k), '')
displayList.append(QStruct(v, pv, "temperature"))

for struct in displayList:
myTable.add_row([struct.alias, struct.pvbase, struct.pvtype])
print(myTable)


def create_softlink(experiment, link_path):
"""
This removes the softlink in the /cds/group/pcds/dist/pds/{}/misc/ and overwrites it with the new active experiment.

"""

# Defaults new softlink in /cds/group/pcds/dist/pds/{}/misc/
if not os.path.exists(link_path):
raise OSError('Path does not exist path: %s' % link_path)

subprocess.run(['ln', '-sf', link_path.format(experiment[0:3]) + 'epicsArch_' + experiment + '.txt', link_path.format(experiment[0:3]) + 'epicsArch_' + experiment[0:3].upper() + '_exp_specific.txt'])


c-tsoi marked this conversation as resolved.
Show resolved Hide resolved
def check_for_duplicates(qs_data, af_data):
"""
Check for duplicate PVs in the questionnaire, the code already throws a warning for duplicate aliases.
If duplicates (PV or Alias) are found in the questionnaire throw error and prompt user to fix and re-run. If they are found in the epicsArch file then step through each match and update accordingly.

Parameters
----------
qs_data : list
af_data : list

Examples
--------
>>> epicsarch-qs xpplv6818 --dry-run

Returns
-------
updated_arch_list : list
Updated list containing sorted alias, PVs.
"""

"""
Part 1: Parse Data from the questionnaire and the archfile
Part 2: Check the questionnaire for pv duplicates
"""

# PART 1

# Convert lists to dictionaries to sort as a key - value pair while also removing any whitespice in the aliases.

# Questionnaire Data, removing whitespaces and newline chars
qsDict = dict(zip(qs_data[::2], qs_data[1::2]))
qsDict = {k.replace(" ", ""): v for k, v in qsDict.items()}
qsDict = {k.replace("\n", ""): v for k, v in qsDict.items()}
qsDict = {k: v.replace(" ", "") for k, v in qsDict.items()}
qsDict = {k: v.replace("\n", "") for k, v in qsDict.items()}
sorted_qsDict = dict(sorted(qsDict.items()))

# If the archfile is not empty then clean it if not ,skip
if len(af_data) > 0:
c-tsoi marked this conversation as resolved.
Show resolved Hide resolved
# ArchFile Data, removing whitespaces and newline chars
afDict = dict(zip(af_data[::2], af_data[1::2]))
afDict = {k.replace(" ", ""): v for k, v in afDict.items()}
afDict = {k.replace("\n", ""): v for k, v in afDict.items()}
afDict = {k: v.replace(" ", "") for k, v in afDict.items()}
afDict = {k: v.replace("\n", "") for k, v in afDict.items()}
sorted_afDict = dict(sorted(afDict.items()))
else:
afDict = {}
sorted_afDict = {}

# PART 2

# Check the questionaire for duplicate PVs
# Making reverse multidict to help identify duplicate values in questionnaire.
rev_keyDict = {}
for key, value in sorted_qsDict.items():
rev_keyDict.setdefault(value, list()).append(key)

pvDuplicate = [key for key, values in rev_keyDict.items() if len(values) > 1]
# Looking for duplicates of PVs in the questionaire
# also print out the alias for PV, change removing to warning operater to remove dup then rerun
for dup in pvDuplicate:
logger.debug("!Duplicate PV in questionnaire!:" + str(dup))
for value in rev_keyDict[dup][1:]:
logger.debug("Found PV duplicate(s) from questionnaire: " + value + ", " + sorted_qsDict[value])
raise ValueError("Please remove duplicates and re-run script!")

# Check to see if the archfile has any data in it
if len(af_data) == 0:
logger.debug("CFD: Case: no archfile given, returning cleaned questionnaire data.")
cleaned_qs_data = [x for item in sorted_qsDict.items() for x in item]
return cleaned_qs_data

# Once we have cleared any duplicates in the questionnaire we moving on to updating values according to the which field matches.

# Checking for matching PVs in questionnaire and archfile
# if the PV matches update the alias by removing the old key and making a new one
for (k, val) in sorted_qsDict.items():
# this looks up the key in the af Dictionary by finding the value
foundKey = get_key(val, sorted_afDict)
if k in sorted_afDict:
logger.debug("!Alias Match in questionnaire and archfile! Updating PV: " + k + ", " + sorted_qsDict[k])
sorted_afDict[k] = sorted_qsDict[k]
elif foundKey:
del sorted_afDict[foundKey]
sorted_afDict[k] = val
logger.debug("!PV Match in questionnaire and archfile! Updating Alias: " + k + ", " + val)

sorted_afDict = dict(sorted(sorted_afDict.items()))
updated_arch_list = [x for item in sorted_afDict.items() for x in item]
logger.debug("\nUpdated Arch List:\n")
logger.debug(updated_arch_list)
return updated_arch_list


def read_archfile(exp_path):
if os.path.exists(exp_path):
with open(exp_path, "r") as experiment:
lines = experiment.readlines()
return lines


def print_dry_run(exp_name):
"""
Print to stdout the data that would be stored in the epicsArch file.
Expand All @@ -113,9 +350,29 @@ def print_dry_run(exp_name):
--------
>>> epicsarch-qs xpplv6818 --dry-run
"""
data = get_questionnaire_data(exp_name)
for item in data:
print(item)

qs_data = get_questionnaire_data(exp_name)

"""
Updating experiment file.
"""

af_path = EPICS_ARCH_FILE_PATH.format(exp_name[0:3]) + 'epicsArch_' + exp_name + '.txt'
af_data = read_archfile(af_path)
if not os.path.exists(af_path):
raise OSError('print_dry_run, invalid path: %s' % af_path)
elif os.path.exists(af_path):
updated_archFile = check_for_duplicates(qs_data, af_data)

for item in updated_archFile:
print(item)


def get_key(val, my_dict):
for k, v in my_dict.items():
if val == v:
return k
return None


def get_questionnaire_data(exp_name):
Expand Down Expand Up @@ -171,7 +428,7 @@ def get_items(exp_name):
return items


def create_file(exp_name, path):
def update_file(exp_name, path):
"""
Create a file with aliases and pvs from the questionnaire.

Expand All @@ -182,16 +439,36 @@ def create_file(exp_name, path):
path : str
Directory where to create the epicsArch file.
"""
data_list = get_questionnaire_data(exp_name)
if not os.path.exists(path):
raise OSError('Invalid path: %s' % path)
qs_data = get_questionnaire_data(exp_name)

path = str(path)
exp_name = str(exp_name)
file_path = ''.join((path, 'epicsArch_', exp_name, '.txt'))

logger.info('Creating epicsArch file for experiment: %s', exp_name)
logger.debug("UpdateFile: qs_data:\n" + str(qs_data))
logger.debug("\nPath: " + path)

af_path = path + "epicsArch_" + str(exp_name) + ".txt"

logger.debug("\nAF Path: " + af_path)

file_path = ''.join((path, 'epicsArch_', exp_name, '.txt'))
if not os.path.exists(path):
raise OSError('Invalid path: %s' % path)
# if the path exists but archfile does not, create af and pull qsd
elif os.path.exists(path) and not os.path.exists(af_path):
logger.debug("UpdateFile: Path is valid, creating archfile\n")
logger.debug('Creating epicsArch file for experiment: %s', exp_name)
cleaned_data = check_for_duplicates(qs_data, {})

# if the path and archfile exists, update af and pull
elif os.path.exists(path) and os.path.exists(af_path):
logger.debug("UpdateFile: Path exists and archfile exists\n")
af_data = read_archfile(af_path)
cleaned_data = check_for_duplicates(qs_data, af_data)

# Write updates to the corresponding file
with open(file_path, 'w') as f:
for data in data_list:
for data in cleaned_data:
try:
f.write(f'{data}\n')
except OSError as ex:
Expand Down
Loading