diff --git a/Accelerators/GANs/gan.py b/Accelerators/GANs/gan.py
index 771f9744..58bc6d58 100644
--- a/Accelerators/GANs/gan.py
+++ b/Accelerators/GANs/gan.py
@@ -122,7 +122,7 @@ def train(output_path, max_epoch, to_restore):
for i in range(sess.run(global_step), max_epoch):
for j in range(int(60000 // batch_size)):
- print("epoch:%s, iter:%s" % (i, j))
+ print(f"epoch:{i}, iter:{j}")
x_value, _ = mnist.train.next_batch(batch_size)
x_value = 2 * x_value.astype(np.float32) - 1
z_value = generate_prior(batch_size, z_size)
diff --git a/C/Tree_2k/examples/visualization/visualization.py b/C/Tree_2k/examples/visualization/visualization.py
index c01be64a..d50cde67 100755
--- a/C/Tree_2k/examples/visualization/visualization.py
+++ b/C/Tree_2k/examples/visualization/visualization.py
@@ -13,7 +13,7 @@ def compute_segments(line):
if len(data) != 4:
print("### error: can only plot 2D data", file=sys.stderr)
sys.exit(1)
- center = data[0:2]
+ center = data[:2]
extent = data[2:4]
ur = (center[0] + extent[0], center[1] + extent[1])
ul = (center[0] - extent[0], center[1] + extent[1])
diff --git a/CPlusPlus/Tbb/Tree/format_timings.py b/CPlusPlus/Tbb/Tree/format_timings.py
index f19a9696..d9544abd 100755
--- a/CPlusPlus/Tbb/Tree/format_timings.py
+++ b/CPlusPlus/Tbb/Tree/format_timings.py
@@ -17,10 +17,10 @@
for line in file:
match = re.match(r'^time: ([^s]+)\s+s$', line)
if match is not None:
- time_avg = float(match.group(1))
+ time_avg = float(match[1])
nr_timings += 1
continue
match = re.match('^procs: (\d+)$', line)
if match is not None:
- nr_procs = int(match.group(1))
+ nr_procs = int(match[1])
print('{0:d} {1:.6f}'.format(nr_procs, time_avg/nr_timings))
diff --git a/DataStorage/NetCDF/PythonSamples/write_netcdf.py b/DataStorage/NetCDF/PythonSamples/write_netcdf.py
index 5e7167d4..93ed4339 100644
--- a/DataStorage/NetCDF/PythonSamples/write_netcdf.py
+++ b/DataStorage/NetCDF/PythonSamples/write_netcdf.py
@@ -17,10 +17,7 @@
arg_parser.add_argument('-v', dest='version', choices=['3', '4'],
default='3', help='NetCDF version to create')
options = arg_parser.parse_args()
- if options.version == '3':
- version = 'NETCDF3_CLASSIC'
- else:
- version = 'NETCDF4'
+ version = 'NETCDF3_CLASSIC' if options.version == '3' else 'NETCDF4'
rootgrp = Dataset(options.file, 'w', format='NETCDF3_CLASSIC')
x_dim = rootgrp.createDimension('x', options.x)
y_dim = rootgrp.createDimension('y', options.y)
diff --git a/Debugging/Arithmetic/dna_generator.py b/Debugging/Arithmetic/dna_generator.py
index 136c5e87..69d75613 100755
--- a/Debugging/Arithmetic/dna_generator.py
+++ b/Debugging/Arithmetic/dna_generator.py
@@ -10,8 +10,7 @@ def available_nucl(nucl_left):
def get_nucl(nucl_left):
while True:
- avail_nucls = available_nucl(nucl_left)
- if avail_nucls:
+ if avail_nucls := available_nucl(nucl_left):
nucl = random.choice(avail_nucls)
nucl_left[nucl] -= 1
yield nucl
diff --git a/Debugging/CompilerFlags/FloatEqual/on_circle_driver.py b/Debugging/CompilerFlags/FloatEqual/on_circle_driver.py
index 99741542..db237002 100755
--- a/Debugging/CompilerFlags/FloatEqual/on_circle_driver.py
+++ b/Debugging/CompilerFlags/FloatEqual/on_circle_driver.py
@@ -13,8 +13,8 @@ def compute_y(x, radius):
[0.0, 0.0, 2.0, 1.0, compute_y(1.0, 2.0)],
]
+cmd = './on_circle_c.exe'
for case in cases:
- cmd = './on_circle_c.exe'
args = ['{0:.15f}'.format(x) for x in case]
print(cmd, ' '.join(args))
output = check_output([cmd, *args])
diff --git a/Python/Biopython/align_seqs.py b/Python/Biopython/align_seqs.py
index 2ab28564..8de5ea69 100755
--- a/Python/Biopython/align_seqs.py
+++ b/Python/Biopython/align_seqs.py
@@ -17,9 +17,10 @@
arg_parser.add_argument('--show', action='store_true',
help='show MUSCLE output')
options = arg_parser.parse_args()
- seqs = {}
- for seq_record in SeqIO.parse(options.file, options.format):
- seqs[seq_record.id] = seq_record.seq
+ seqs = {
+ seq_record.id: seq_record.seq
+ for seq_record in SeqIO.parse(options.file, options.format)
+ }
if options.alignment:
with open(options.alignment, 'r') as alignment_file:
stdout = alignment_file.read()
diff --git a/Python/Biopython/entrez_db_info.py b/Python/Biopython/entrez_db_info.py
index f89b8539..cf39179d 100755
--- a/Python/Biopython/entrez_db_info.py
+++ b/Python/Biopython/entrez_db_info.py
@@ -18,8 +18,8 @@
record = Entrez.read(handle)
db_info = record['DbInfo']
print(db_info['Description'])
- print('Count: {}'.format(db_info['Count']))
- print('Last update: {}'.format(db_info['LastUpdate']))
+ print(f"Count: {db_info['Count']}")
+ print(f"Last update: {db_info['LastUpdate']}")
if options.fields:
print('Fields:')
fmt_str = '{Name} ({FullName}): {Description}'
diff --git a/Python/Biopython/read_seq.py b/Python/Biopython/read_seq.py
index cd77d8cc..6651be84 100755
--- a/Python/Biopython/read_seq.py
+++ b/Python/Biopython/read_seq.py
@@ -23,9 +23,10 @@ def compute_stats(seq):
arg_parser.add_argument('file', help='sequence file to parse')
arg_parser.add_argument('--format', default='fasta', help='file format')
options = arg_parser.parse_args()
- seqs = {}
- for seq_record in SeqIO.parse(options.file, options.format):
- seqs[seq_record.id] = seq_record.seq
+ seqs = {
+ seq_record.id: seq_record.seq
+ for seq_record in SeqIO.parse(options.file, options.format)
+ }
fmt_str = ('id: {id}\n\t'
'length: {stats.length}\n\t'
'gc: {stats.gc}\n\t'
diff --git a/Python/Biopython/search_entrez.py b/Python/Biopython/search_entrez.py
index ff9cfaee..f5d006de 100755
--- a/Python/Biopython/search_entrez.py
+++ b/Python/Biopython/search_entrez.py
@@ -26,9 +26,9 @@
if options.summary:
handle = Entrez.esummary(db='nucleotide', id=seq_id)
summary = Entrez.read(handle)
- print('ID {}:'.format(seq_id))
- print(' {}'.format(summary[0]['Title']))
- print(' Updated: {}'.format(summary[0]['UpdateDate']))
+ print(f'ID {seq_id}:')
+ print(f" {summary[0]['Title']}")
+ print(f" Updated: {summary[0]['UpdateDate']}")
else:
file_name = os.path.join('Data', '{0}.gbk'.format(seq_id))
if not os.path.isfile(file_name):
diff --git a/Python/Biopython/seq_info.py b/Python/Biopython/seq_info.py
index c55c044a..7be2ee1b 100755
--- a/Python/Biopython/seq_info.py
+++ b/Python/Biopython/seq_info.py
@@ -5,7 +5,7 @@
import textwrap
def print_seq_record(seq_record, indent_with=' ', indent=''):
- print('Sequence ID: {}'.format(seq_record.id))
+ print(f'Sequence ID: {seq_record.id}')
indent += indent_with
print('{0}Name: {1}'.format(indent, seq_record.name))
print('{0}Description: {1}'.format(indent, seq_record.description))
diff --git a/Python/Birdsong/create_signal.py b/Python/Birdsong/create_signal.py
index 6cafc20b..4d5af92b 100755
--- a/Python/Birdsong/create_signal.py
+++ b/Python/Birdsong/create_signal.py
@@ -42,9 +42,7 @@ def normalize(signal, ampl):
arg_parser.add_argument('--out', action='store_true',
help='print signal to standard output')
options = arg_parser.parse_args()
- if options.specs_file:
- pass
- else:
+ if not options.specs_file:
if len(options.freqs) != len(options.ampls):
msg = '# error: {0:d} frequencies for {1:d} amplitudes\n'
sys.stderr.write(msg.format(len(options.freqs),
diff --git a/Python/CodeCoverage/functions.py b/Python/CodeCoverage/functions.py
index abcd32ed..ae31f6da 100644
--- a/Python/CodeCoverage/functions.py
+++ b/Python/CodeCoverage/functions.py
@@ -1,8 +1,5 @@
def fac_r(n):
- if n < 2:
- return 1
- else:
- return n*fac_r(n - 1)
+ return 1 if n < 2 else n*fac_r(n - 1)
def fac_i(n):
result = 1
diff --git a/Python/CodeEvaluation/fac.py b/Python/CodeEvaluation/fac.py
index 8c66954a..e4403275 100644
--- a/Python/CodeEvaluation/fac.py
+++ b/Python/CodeEvaluation/fac.py
@@ -1,5 +1,2 @@
def fac(n):
- if n < 2:
- return 1
- else:
- return n*fac(n-1)
+ return 1 if n < 2 else n*fac(n-1)
diff --git a/Python/CodeEvaluation/fib.py b/Python/CodeEvaluation/fib.py
index da4b04b2..0486ae2c 100644
--- a/Python/CodeEvaluation/fib.py
+++ b/Python/CodeEvaluation/fib.py
@@ -1,5 +1,2 @@
def fib(n):
- if n == 0 or n == 1:
- return 1
- else:
- return fib(n-1) + fib(n-2)
+ return 1 if n in [0, 1] else fib(n-1) + fib(n-2)
diff --git a/Python/CodeTesting/Asserts/fac.py b/Python/CodeTesting/Asserts/fac.py
index 7572aa64..9fde6b82 100755
--- a/Python/CodeTesting/Asserts/fac.py
+++ b/Python/CodeTesting/Asserts/fac.py
@@ -4,10 +4,7 @@ def fac(n):
'''compute the factorial of given number'''
assert type(n) == int, 'argument must be integer'
assert n >= 0, 'argument must be positive'
- if n > 1:
- return n*fac(n - 1)
- else:
- return 1
+ return n*fac(n - 1) if n > 1 else 1
if __name__ == '__main__':
for i in range(5):
diff --git a/Python/CodeTesting/constant_db.py b/Python/CodeTesting/constant_db.py
index 18b595ac..f3db0c8c 100644
--- a/Python/CodeTesting/constant_db.py
+++ b/Python/CodeTesting/constant_db.py
@@ -41,11 +41,10 @@ def get_value(self, name):
WHERE name = ?''', (name, ))
rows = cursor.fetchall()
cursor.close()
- if not rows:
- msg = "constant '{0}' is undefined".format(name)
- raise UnknownConstantError(msg)
- else:
+ if rows:
return rows[0][0]
+ msg = "constant '{0}' is undefined".format(name)
+ raise UnknownConstantError(msg)
def get_names(self):
cursor = self._conn.cursor()
diff --git a/Python/CommandLineArgs/ArgParse/partial_parse.py b/Python/CommandLineArgs/ArgParse/partial_parse.py
index f9af4252..723e74ec 100755
--- a/Python/CommandLineArgs/ArgParse/partial_parse.py
+++ b/Python/CommandLineArgs/ArgParse/partial_parse.py
@@ -19,5 +19,5 @@
options.resoure_specs))
print('resources: ' + ', '.join(specs))
if options.account:
- print('account: ' + options.account)
+ print(f'account: {options.account}')
print('unparsed: ' + ', '.join(unparsed))
diff --git a/Python/CommandLineArgs/ArgParse/two_stage_parse.py b/Python/CommandLineArgs/ArgParse/two_stage_parse.py
index 3ff15330..5600692f 100755
--- a/Python/CommandLineArgs/ArgParse/two_stage_parse.py
+++ b/Python/CommandLineArgs/ArgParse/two_stage_parse.py
@@ -7,7 +7,7 @@
def parse_job_script(file_name):
- args = list()
+ args = []
with open(file_name) as file:
for line in file:
if line.lstrip().startswith('#PBS '):
diff --git a/Python/ConfigParser/config_reader.py b/Python/ConfigParser/config_reader.py
index 1ac31be4..af46e602 100755
--- a/Python/ConfigParser/config_reader.py
+++ b/Python/ConfigParser/config_reader.py
@@ -5,10 +5,7 @@
def main():
- if len(sys.argv) > 1:
- cfg_file = sys.argv[1]
- else:
- cfg_file = 'defaults.conf'
+ cfg_file = sys.argv[1] if len(sys.argv) > 1 else 'defaults.conf'
cfg_parser = SafeConfigParser()
cfg_parser.read(cfg_file)
print('Sections:')
diff --git a/Python/ContextManager/context.py b/Python/ContextManager/context.py
index 0c4b0f41..84e99c34 100755
--- a/Python/ContextManager/context.py
+++ b/Python/ContextManager/context.py
@@ -37,10 +37,9 @@ def main():
print('in context {0}'.format(c2._context_nr))
with label('foo') as foo, label('bar') as bar:
print(foo, bar)
- with ContextTest(1) as c1, ContextTest(2) as c2:
+ with (ContextTest(1) as c1, ContextTest(2) as c2):
print('in context {0}'.format(c1._context_nr))
raise Exception()
- print('in context {0}'.format(c2._context_nr))
return 0
if __name__ == '__main__':
diff --git a/Python/Coroutines/stats.py b/Python/Coroutines/stats.py
index d8cc3815..7510e7cf 100755
--- a/Python/Coroutines/stats.py
+++ b/Python/Coroutines/stats.py
@@ -26,15 +26,12 @@ def process_line(line, mode=float):
if not line:
return None
items = [item.strip() for item in line.split(',')]
- if mode == float:
- return [float(item) for item in items]
- else:
- return items
+ return [float(item) for item in items] if mode == float else items
if __name__ == '__main__':
line = sys.stdin.readline()
names = process_line(line, mode='text')
- averagers = [stats() for name in names]
+ averagers = [stats() for _ in names]
for averager in averagers:
next(averager)
for line in sys.stdin:
diff --git a/Python/Cython/Numpy/compute_sums.py b/Python/Cython/Numpy/compute_sums.py
index b5a56693..3f411bdd 100755
--- a/Python/Cython/Numpy/compute_sums.py
+++ b/Python/Cython/Numpy/compute_sums.py
@@ -25,7 +25,7 @@ def py_sum(a):
for func in [array_sum, np.sum, py_sum]:
total = 0.0
start_time = timeit.default_timer()
- for iter_nr in range(options.iter):
+ for _ in range(options.iter):
total += func(a)
total_time = timeit.default_timer() - start_time
print('{0:s}: {1:.6f} s ({2})'.format(func.__name__, total_time,
diff --git a/Python/Cython/Primes/primes_vanilla.py b/Python/Cython/Primes/primes_vanilla.py
index 9907294a..04b94d91 100644
--- a/Python/Cython/Primes/primes_vanilla.py
+++ b/Python/Cython/Primes/primes_vanilla.py
@@ -4,17 +4,16 @@
def primes(kmax):
p = array('i', [0]*1000)
result = []
- if kmax > 1000:
- kmax = 1000
+ kmax = min(kmax, 1000)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/Dask/create_csv_data.py b/Python/Dask/create_csv_data.py
index b3af9524..18c51ec3 100755
--- a/Python/Dask/create_csv_data.py
+++ b/Python/Dask/create_csv_data.py
@@ -10,8 +10,10 @@
def write_file(args):
file_name, rows, curr_time, delta_time, curr_vals, delta_val = args
- fieldnames = ['timestamp']
- fieldnames.extend(['C{0:d}'.format(i + 1) for i in range(len(curr_vals))])
+ fieldnames = [
+ 'timestamp',
+ *['C{0:d}'.format(i + 1) for i in range(len(curr_vals))],
+ ]
with open(file_name, 'w', newline='') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
diff --git a/Python/Dask/dask_distr_test.py b/Python/Dask/dask_distr_test.py
index f34bae15..949f81ee 100755
--- a/Python/Dask/dask_distr_test.py
+++ b/Python/Dask/dask_distr_test.py
@@ -39,7 +39,7 @@ def get_hostname(i):
if options.verbose:
print('task placement:')
print('\t' + '\n\t'.join(process_locations))
- count = dict()
+ count = {}
for process_location in process_locations:
_, _, hostname = process_location.split()
if hostname not in count:
diff --git a/Python/DataFormats/Vcd/vcd_parser.py b/Python/DataFormats/Vcd/vcd_parser.py
index 4f874fc0..8dba30e6 100755
--- a/Python/DataFormats/Vcd/vcd_parser.py
+++ b/Python/DataFormats/Vcd/vcd_parser.py
@@ -15,7 +15,7 @@ def parse_config_line(meta_data, line):
meta_data[symbol] = demangle_name(name)
def parse_config(vcd_file):
- meta_data = dict()
+ meta_data = {}
for line in vcd_file:
line = line.strip()
if line == '$end':
@@ -37,16 +37,15 @@ def update_buffer(buffer, line, meta_data):
buffer[key] = value
def init_data(meta_data):
- data = dict()
- data['time'] = list()
+ data = {'time': []}
for var in meta_data:
- data[meta_data[var]] = list()
+ data[meta_data[var]] = []
return data
def parse_data(vcd_file, meta_data):
data = init_data(meta_data)
time_stamp = None
- buffer = dict()
+ buffer = {}
for line in vcd_file:
line = line.strip()
if line.startswith('#'):
@@ -68,9 +67,7 @@ def write_vcd_data_structure(out_file, data, sep=' '):
columns = list(data.keys())
out_file.write(sep.join(columns) + '\n')
for time_step in range(len(data['time'])):
- data_line = list()
- for var in columns:
- data_line.append(data[var][time_step])
+ data_line = [data[var][time_step] for var in columns]
out_file.write(sep.join(str(data_item) for data_item in data_line))
out_file.write('\n')
diff --git a/Python/DataFormats/agt_parser.py b/Python/DataFormats/agt_parser.py
index 7c396dd6..d3336fae 100755
--- a/Python/DataFormats/agt_parser.py
+++ b/Python/DataFormats/agt_parser.py
@@ -106,7 +106,7 @@ def _parse_data(self, agt_file):
if not match:
msg = "line {0:d}: invalid number of measurements '{1}'"
raise AgtDataError(msg.format(self._current_line, nr_lines_str))
- nr_lines = int(match.group(1))
+ nr_lines = int(match[1])
self._current_line += 1
# ignore header line
agt_file.readline()
diff --git a/Python/DataFormats/data_generator.py b/Python/DataFormats/data_generator.py
index 59288602..06c00092 100755
--- a/Python/DataFormats/data_generator.py
+++ b/Python/DataFormats/data_generator.py
@@ -47,11 +47,10 @@ def __iter__(self):
return self
def __next__(self):
- if self._current < self.n:
- self._current += 1
- return self._distr(*self._params)
- else:
+ if self._current >= self.n:
raise StopIteration()
+ self._current += 1
+ return self._distr(*self._params)
class DistributionCreator(object):
@@ -108,9 +107,9 @@ def __init__(self, file_name, table_name, col_defs):
self._row = self._table.row
def _create_table(self, table_name, col_defs):
- description = {}
- for col_def in col_defs:
- description[col_def['name']] = self._typemap[col_def['type']]
+ description = {
+ col_def['name']: self._typemap[col_def['type']] for col_def in col_defs
+ }
return self._file.create_table('/', table_name, description)
def set_headers(self, headers):
diff --git a/Python/DataFormats/read_csv.py b/Python/DataFormats/read_csv.py
index b14ab739..3989bf8a 100755
--- a/Python/DataFormats/read_csv.py
+++ b/Python/DataFormats/read_csv.py
@@ -21,7 +21,7 @@ def main():
print('{name} --- {weight}'.format(name=row['name'],
weight=row['weight']))
sum += float(row['weight'])
- print('sum = {}'.format(sum))
+ print(f'sum = {sum}')
if __name__ == '__main__':
main()
diff --git a/Python/DataFormats/read_variable_length_array.py b/Python/DataFormats/read_variable_length_array.py
index a84ea9ff..c3ba07a6 100755
--- a/Python/DataFormats/read_variable_length_array.py
+++ b/Python/DataFormats/read_variable_length_array.py
@@ -13,10 +13,8 @@ def read_array(data_file, length):
arg_parser.add_argument('file', help='binary file to read')
options = arg_parser.parse_args()
with open(options.file, 'rb') as data_file:
- buffer = data_file.read(4);
- while buffer:
+ while buffer := data_file.read(4):
length = unpack('I', buffer)[0]
values = read_array(data_file, length)
value_str = ' '.join(f'{x:.2f}' for x in values)
print(f'{length:d}: {value_str:s}')
- buffer = data_file.read(4)
diff --git a/Python/DataFormats/read_xml.py b/Python/DataFormats/read_xml.py
index 12547614..6ea3d56a 100755
--- a/Python/DataFormats/read_xml.py
+++ b/Python/DataFormats/read_xml.py
@@ -55,9 +55,7 @@ def startDocument(self):
def startElement(self, name, attrs):
if name == 'block':
logging.info('start of {0}'.format(attrs.getValue('name')))
- parent_name = ''
- if self._stack:
- parent_name = self._stack[-1].name + '/'
+ parent_name = f'{self._stack[-1].name}/' if self._stack else ''
block = Block(parent_name + attrs.getValue('name'))
self._stack.append(block)
elif name == 'item':
@@ -65,8 +63,7 @@ def startElement(self, name, attrs):
def characters(self, contents):
if self.in_item:
- contents = contents.strip()
- if contents:
+ if contents := contents.strip():
data = float(contents.strip())
logging.info("found '{0}'".format(data))
self._stack[-1].add_data(data)
diff --git a/Python/DataStructures/priority_queue.py b/Python/DataStructures/priority_queue.py
index e6f4a298..e1e983f9 100755
--- a/Python/DataStructures/priority_queue.py
+++ b/Python/DataStructures/priority_queue.py
@@ -7,10 +7,7 @@
def create_jobs():
names = ['beast', 'fluent', 'beast', 'gromacs', 'amrvac']
- jobs = []
- for name in names:
- jobs.append((random.random(), name))
- return jobs
+ return [(random.random(), name) for name in names]
if __name__ == '__main__':
random.seed(143545)
diff --git a/Python/DbAccess/ExperimentDB/shell.py b/Python/DbAccess/ExperimentDB/shell.py
index 89139ed8..66d9b2a4 100755
--- a/Python/DbAccess/ExperimentDB/shell.py
+++ b/Python/DbAccess/ExperimentDB/shell.py
@@ -35,7 +35,7 @@ def parse_show_arg(arg_str):
'samples': Sample
}
args = shlex.split(arg_str)
- if len(args) != 1 and len(args) != 3:
+ if len(args) not in [1, 3]:
msg = 'Expecting show
[ for ]'
raise SyntaxException(msg)
cls_str = args[0]
@@ -68,19 +68,18 @@ def do_show(self, arg_str):
return
if not item_id:
items = self._db_session.query(cls).all()
- else:
- if cls == Experiment:
- items = self._db_session.\
+ elif cls == Experiment:
+ items = self._db_session.\
query(Experiment).\
join(staff_assignments).\
filter_by(researcher_id=item_id).all()
- elif cls == Researcher:
- items = self._db_session.\
+ elif cls == Researcher:
+ items = self._db_session.\
query(Researcher).\
join(staff_assignments).\
filter_by(experiment_id=item_id).all()
- elif cls == Sample:
- items = self._db_session.\
+ elif cls == Sample:
+ items = self._db_session.\
query(Sample).\
filter_by(experiment_id=item_id).all()
for item in items:
diff --git a/Python/DbAccess/ExperimentDB/view.py b/Python/DbAccess/ExperimentDB/view.py
index 2122e012..ad617f49 100755
--- a/Python/DbAccess/ExperimentDB/view.py
+++ b/Python/DbAccess/ExperimentDB/view.py
@@ -17,7 +17,7 @@
print('\t{0}, {1}'.format(researcher.last_name,
researcher.first_name))
for sample in experiment.samples:
- print('\t{}'.format(sample.description))
+ print(f'\t{sample.description}')
samples = db_session.query(Sample).all()
for sample in samples:
print(sample.description)
@@ -26,4 +26,4 @@
for researcher in researchers:
print(researcher)
for experiment in researcher.experiments:
- print('\t{}'.format(experiment.description))
+ print(f'\t{experiment.description}')
diff --git a/Python/DbAccess/StraightSql/fill_db.py b/Python/DbAccess/StraightSql/fill_db.py
index 8761feba..8b6e1af3 100755
--- a/Python/DbAccess/StraightSql/fill_db.py
+++ b/Python/DbAccess/StraightSql/fill_db.py
@@ -8,11 +8,10 @@
def generate_city_codes(nr_cities, code_length=4):
- cities = []
- for city_nr in range(nr_cities):
- cities.append(''.join([random.choice(string.letters)
- for i in range(code_length)]))
- return cities
+ return [
+ ''.join([random.choice(string.letters) for _ in range(code_length)])
+ for _ in range(nr_cities)
+ ]
def convert_date(date_str):
diff --git a/Python/Decorators/decorator.py b/Python/Decorators/decorator.py
index d9adc3d9..4b8b483b 100755
--- a/Python/Decorators/decorator.py
+++ b/Python/Decorators/decorator.py
@@ -37,10 +37,7 @@ def wrapped(n):
@check_min
def fact(n):
'''compute factorial of given number'''
- if n == 0:
- return 1
- else:
- return n*fact(n - 1)
+ return 1 if n == 0 else n*fact(n - 1)
if __name__ == '__main__':
import sys
diff --git a/Python/Decorators/memoize.py b/Python/Decorators/memoize.py
index c825a0ba..2d791625 100755
--- a/Python/Decorators/memoize.py
+++ b/Python/Decorators/memoize.py
@@ -16,32 +16,21 @@ def wrapper(n):
@memoize
def fib_memoized(n):
- if n < 2:
- return 1
- else:
- return fib_memoized(n - 1) + fib_memoized(n - 2)
+ return 1 if n < 2 else fib_memoized(n - 1) + fib_memoized(n - 2)
@lru_cache(100)
def fib_lru_cache(n):
- if n < 2:
- return 1
- else:
- return fib_lru_cache(n - 1) + fib_lru_cache(n - 2)
+ return 1 if n < 2 else fib_lru_cache(n - 1) + fib_lru_cache(n - 2)
def fib(n):
- if n < 2:
- return 1
- else:
- return fib(n - 1) + fib(n - 2)
+ return 1 if n < 2 else fib(n - 1) + fib(n - 2)
def execute(func, n_max):
- values = []
start = datetime.now()
- for n in range(n_max):
- values.append(func(n))
+ values = [func(n) for n in range(n_max)]
delta = datetime.now() - start
for n in range(n_max):
print('{0}({1}) = {2}'.format(func.__name__, n, values[n]))
diff --git a/Python/Descriptors/typed_property.py b/Python/Descriptors/typed_property.py
index 4e38c5f2..948a19e0 100755
--- a/Python/Descriptors/typed_property.py
+++ b/Python/Descriptors/typed_property.py
@@ -4,7 +4,7 @@
class TypedProperty(object):
def __init__(self, name, type, default=None):
- self._name = '-' + name
+ self._name = f'-{name}'
self._type = type
self._default = default if default else type()
diff --git a/Python/Exercises/Colorize/colorize.py b/Python/Exercises/Colorize/colorize.py
index 86e167f1..fecf55e3 100755
--- a/Python/Exercises/Colorize/colorize.py
+++ b/Python/Exercises/Colorize/colorize.py
@@ -25,22 +25,21 @@ def colorize(text, style='OKGREEN'):
'''
import os
terminals = ['xterm', 'rxvt', 'color']
- if any(map(lambda x: x in os.environ['TERM'], terminals)):
- styles = {
- 'HEADER': '\033[95m',
- 'OKBLUE': '\033[94m',
- 'OKGREEN': '\033[92m',
- 'WARNING': '\033[93m',
- 'FAIL': '\033[91m',
- 'ENDC': '\033[0m',
- 'BOLD': '\033[1m',
- 'UNDERLINE': '\033[4m',
- }
- if style not in styles:
- raise ValueError('unknown style {}'.format(style))
- return styles[style] + text + styles['ENDC']
- else:
+ if not any(map(lambda x: x in os.environ['TERM'], terminals)):
return text
+ styles = {
+ 'HEADER': '\033[95m',
+ 'OKBLUE': '\033[94m',
+ 'OKGREEN': '\033[92m',
+ 'WARNING': '\033[93m',
+ 'FAIL': '\033[91m',
+ 'ENDC': '\033[0m',
+ 'BOLD': '\033[1m',
+ 'UNDERLINE': '\033[4m',
+ }
+ if style not in styles:
+ raise ValueError(f'unknown style {style}')
+ return styles[style] + text + styles['ENDC']
if __name__ == '__main__':
diff --git a/Python/Exercises/ComputingStatistics/stats-1.0.py b/Python/Exercises/ComputingStatistics/stats-1.0.py
index 2159e37f..fe468985 100755
--- a/Python/Exercises/ComputingStatistics/stats-1.0.py
+++ b/Python/Exercises/ComputingStatistics/stats-1.0.py
@@ -6,13 +6,11 @@
def main():
names_str = sys.stdin.readline().rstrip()
- names = []
- for name in names_str.split(','):
- names.append(name.strip())
+ names = [name.strip() for name in names_str.split(',')]
nr_cols = len(names)
sums = []
sums2 = []
- for i in range(nr_cols):
+ for _ in range(nr_cols):
sums.append(0.0)
sums2.append(0.0)
nr_rows = 0
diff --git a/Python/Exercises/ConcatFiles/concat_files.py b/Python/Exercises/ConcatFiles/concat_files.py
index 03e99dcd..a4ffe5fd 100755
--- a/Python/Exercises/ConcatFiles/concat_files.py
+++ b/Python/Exercises/ConcatFiles/concat_files.py
@@ -22,9 +22,8 @@ def main():
if not is_header_written:
output_file.write(line)
is_header_written = True
- else:
- if line.strip():
- output_file.write(line)
+ elif line.strip():
+ output_file.write(line)
return 0
if __name__ == '__main__':
diff --git a/Python/Exercises/ConvertingTime/seconds2time.py b/Python/Exercises/ConvertingTime/seconds2time.py
index 934f417e..584848ec 100755
--- a/Python/Exercises/ConvertingTime/seconds2time.py
+++ b/Python/Exercises/ConvertingTime/seconds2time.py
@@ -10,7 +10,7 @@ def main():
idx = 0
while total_seconds > 0:
parts[idx] = total_seconds//units[idx]
- total_seconds = total_seconds % units[idx]
+ total_seconds %= units[idx]
idx += 1
print(':'.join('{:02d}'.format(x) for x in parts))
return 0
diff --git a/Python/Exercises/ConvertingTime/time2seconds.py b/Python/Exercises/ConvertingTime/time2seconds.py
index 61d282ec..5d2163f2 100755
--- a/Python/Exercises/ConvertingTime/time2seconds.py
+++ b/Python/Exercises/ConvertingTime/time2seconds.py
@@ -7,7 +7,7 @@ def main():
units = [1, 60, 3600]
parts = [int(x) for x in sys.argv[1].split(':')]
parts.reverse()
- result = sum([x*y for x, y in zip(parts, units)])
+ result = sum(x*y for x, y in zip(parts, units))
print(result)
return 0
diff --git a/Python/Exercises/DomainSizes/count-domains.py b/Python/Exercises/DomainSizes/count-domains.py
index d9324388..280b9b7f 100755
--- a/Python/Exercises/DomainSizes/count-domains.py
+++ b/Python/Exercises/DomainSizes/count-domains.py
@@ -4,19 +4,16 @@
def read_lattice():
- lattice = []
- for line in sys.stdin:
- lattice.append(line.strip())
- return lattice
+ return [line.strip() for line in sys.stdin]
def compute_neighbouts(coord, size):
neighbours = []
- if coord[0] - 1 >= 0:
+ if coord[0] >= 1:
neighbours.append((coord[0] - 1, coord[1]))
if coord[0] + 1 < size:
neighbours.append((coord[0] + 1, coord[1]))
- if coord[1] - 1 >= 0:
+ if coord[1] >= 1:
neighbours.append((coord[0], coord[1] - 1))
if coord[1] + 1 < size:
neighbours.append((coord[0], coord[1] + 1))
@@ -26,7 +23,7 @@ def compute_neighbouts(coord, size):
def find_domain(lattice, domains, cd, domain_nr):
queue = [cd]
domains[cd[0]][cd[1]] = domain_nr
- while len(queue) > 0:
+ while queue:
cd = queue.pop()
for nb in compute_neighbouts(cd, len(lattice)):
if (domains[nb[0]][nb[1]] == -1 and
@@ -36,9 +33,7 @@ def find_domain(lattice, domains, cd, domain_nr):
def identify_domains(lattice):
- domains = []
- for _ in range(len(lattice)):
- domains.append([-1] * len(lattice))
+ domains = [[-1] * len(lattice) for _ in range(len(lattice))]
domain_nr = 0
for i in range(len(lattice)):
for j in range(len(lattice)):
diff --git a/Python/Exercises/DomainSizes/generate_domain_file.py b/Python/Exercises/DomainSizes/generate_domain_file.py
index bf3ffd45..851efdfb 100755
--- a/Python/Exercises/DomainSizes/generate_domain_file.py
+++ b/Python/Exercises/DomainSizes/generate_domain_file.py
@@ -5,10 +5,7 @@
def main():
- if len(sys.argv) > 1:
- n = int(sys.argv[1])
- else:
- n = 10
+ n = int(sys.argv[1]) if len(sys.argv) > 1 else 10
for _ in range(n):
for _ in range(n):
c = random.choice(['0', '1'])
diff --git a/Python/Exercises/Fibonacci/fib.py b/Python/Exercises/Fibonacci/fib.py
index 2cfa7a88..d14133b5 100755
--- a/Python/Exercises/Fibonacci/fib.py
+++ b/Python/Exercises/Fibonacci/fib.py
@@ -8,7 +8,7 @@ def all_fibonacci():
n_2 = 1
n_1 = 1
for n in itertools.count(1):
- if n == 1 or n == 2:
+ if n in [1, 2]:
yield 1
else:
value = n_1 + n_2
@@ -19,9 +19,7 @@ def all_fibonacci():
def main():
nr_fib = 0
- max_nr_fib = None
- if len(sys.argv) > 1:
- max_nr_fib = int(sys.argv[1])
+ max_nr_fib = int(sys.argv[1]) if len(sys.argv) > 1 else None
for n in all_fibonacci():
if max_nr_fib and nr_fib >= max_nr_fib:
break
diff --git a/Python/Exercises/HumanTime/human_time2seconds.py b/Python/Exercises/HumanTime/human_time2seconds.py
index 837ab790..e3a488eb 100755
--- a/Python/Exercises/HumanTime/human_time2seconds.py
+++ b/Python/Exercises/HumanTime/human_time2seconds.py
@@ -12,12 +12,12 @@ def convert2seconds(time_str):
match = re.match(h_expr + r'\s*' + m_expr + r'\s*' + s_expr + r'\s*$',
time_str, re.I)
if match is not None:
- if match.group(1) is not None:
- hours = int(match.group(1))
- if match.group(2) is not None:
- minutes = int(match.group(2))
- if match.group(3) is not None:
- seconds = int(match.group(3))
+ if match[1] is not None:
+ hours = int(match[1])
+ if match[2] is not None:
+ minutes = int(match[2])
+ if match[3] is not None:
+ seconds = int(match[3])
else:
sys.stderr.write('### error: invalid time string\n')
sys.exit(2)
diff --git a/Python/Exercises/Humanize/dehumanize.py b/Python/Exercises/Humanize/dehumanize.py
index 3c2949cb..1da75946 100755
--- a/Python/Exercises/Humanize/dehumanize.py
+++ b/Python/Exercises/Humanize/dehumanize.py
@@ -43,9 +43,24 @@ def dehumanize(str_value, base=10, digits=1):
'1024.00'
'''
import re
- if base != 2 and base != 10:
+ if base not in [2, 10]:
raise ValueError('base should be 2 or 10, not {:d}'.format(base))
thousands = 10**3 if base == 10 else 2**10
+ if not (
+ match := re.match(
+ r'^\s*((?:\+|-)?(?:\d+\.?\d*)|(?:\d*\.\d+))\s*([ \w]*)$', str_value
+ )
+ ):
+ raise ValueError("'{0:s}' is not a valid quantity".format(str_value))
+ n = float(match.group(1))
+ if match.group(2):
+ order = match.group(2)[0].lower()
+ units = match.group(2)[1:] if len(match.group(2)) > 1 else ''
+ if units and not units.startswith(' '):
+ units = f' {units}'
+ else:
+ order, units = '', ''
+ fmt_str = '{{0:.{}f}}{{1:s}}'.format(digits)
orders = {
'': 0,
'k': 1,
@@ -54,21 +69,7 @@ def dehumanize(str_value, base=10, digits=1):
't': 4,
'p': 5,
}
- match = re.match(r'^\s*((?:\+|-)?(?:\d+\.?\d*)|(?:\d*\.\d+))\s*([ \w]*)$',
- str_value)
- if match:
- n = float(match.group(1))
- if match.group(2):
- order = match.group(2)[0].lower()
- units = match.group(2)[1:] if len(match.group(2)) > 1 else ''
- if units and not units.startswith(' '):
- units = ' ' + units
- else:
- order, units = '', ''
- fmt_str = '{{0:.{}f}}{{1:s}}'.format(digits)
- return fmt_str.format(n*thousands**orders[order], units)
- else:
- raise ValueError("'{0:s}' is not a valid quantity".format(str_value))
+ return fmt_str.format(n*thousands**orders[order], units)
if __name__ == '__main__':
diff --git a/Python/Exercises/Humanize/humanize.py b/Python/Exercises/Humanize/humanize.py
index 1fd3b3ac..808f7a09 100755
--- a/Python/Exercises/Humanize/humanize.py
+++ b/Python/Exercises/Humanize/humanize.py
@@ -42,7 +42,7 @@ def humanize(n, base=10, digits=1, unit=''):
'12.3 m'
'''
import math
- if base != 2 and base != 10:
+ if base not in [2, 10]:
raise ValueError('base should be 2 or 10, not {:d}'.format(base))
thousands = 3 if base == 10 else 10
orders = {
diff --git a/Python/Exercises/Phonenumbers/mapper-1.0.py b/Python/Exercises/Phonenumbers/mapper-1.0.py
index 5c36614e..d5cd6a63 100755
--- a/Python/Exercises/Phonenumbers/mapper-1.0.py
+++ b/Python/Exercises/Phonenumbers/mapper-1.0.py
@@ -31,23 +31,17 @@ def map(self, phone_number):
words = ['']
for digit in phone_number:
words = self.extend(words, digit)
- real_words = []
- for word_candidate in words:
- if self.is_word(word_candidate):
- real_words.append(word_candidate)
- return real_words
+ return [
+ word_candidate
+ for word_candidate in words
+ if self.is_word(word_candidate)
+ ]
def is_prefix(self, word_prefix):
- for word in self._dictionary:
- if word.startswith(word_prefix):
- return True
- return False
+ return any(word.startswith(word_prefix) for word in self._dictionary)
def is_word(self, word_candidate):
- for word in self._dictionary:
- if word == word_candidate:
- return True
- return False
+ return any(word == word_candidate for word in self._dictionary)
def extend(self, words, digit):
new_words = []
diff --git a/Python/Exercises/Phonenumbers/mapper-2.0.py b/Python/Exercises/Phonenumbers/mapper-2.0.py
index e8aafc73..1586335c 100755
--- a/Python/Exercises/Phonenumbers/mapper-2.0.py
+++ b/Python/Exercises/Phonenumbers/mapper-2.0.py
@@ -21,7 +21,7 @@ def __init__(self, dict_file_name):
def read_dict(self, dict_file_name):
dictionary = []
with open(dict_file_name) as dict_file:
- for line in dict_file.readlines():
+ for line in dict_file:
word = line.strip()
if word.isalpha():
dictionary.append(word.lower())
diff --git a/Python/Exercises/Phonenumbers/mapper-3.0.py b/Python/Exercises/Phonenumbers/mapper-3.0.py
index 7f88444f..9f9884ce 100755
--- a/Python/Exercises/Phonenumbers/mapper-3.0.py
+++ b/Python/Exercises/Phonenumbers/mapper-3.0.py
@@ -24,23 +24,16 @@ def __init__(self, dict_file_name):
def read_dict(self, dict_file_name):
dictionary = []
with open(dict_file_name) as dict_file:
- for line in dict_file.readlines():
+ for line in dict_file:
word = line.strip()
if word.isalpha():
dictionary.append(word.lower())
return dictionary
def map(self, phone_number):
- regex_str = r''
- for digit in phone_number:
- regex_str += self._mapping[digit]
- regex_str += r'\w*$'
+ regex_str = r''.join(self._mapping[digit] for digit in phone_number) + r'\w*$'
regex = re.compile(regex_str)
- words = []
- for word in self._dictionary:
- if regex.match(word) is not None:
- words.append(word)
- return words
+ return [word for word in self._dictionary if regex.match(word) is not None]
def main():
diff --git a/Python/Exercises/Scramble/scramble.py b/Python/Exercises/Scramble/scramble.py
index 20482c32..5fbfbda9 100755
--- a/Python/Exercises/Scramble/scramble.py
+++ b/Python/Exercises/Scramble/scramble.py
@@ -7,8 +7,7 @@
def tokenize(line):
- tokens = re.split(r'([A-Za-z]+)', line)
- return tokens
+ return re.split(r'([A-Za-z]+)', line)
def neighbour_positions(length, nr_swaps):
diff --git a/Python/Exercises/SplitFile/split_file.py b/Python/Exercises/SplitFile/split_file.py
index 3191771b..34bd402d 100755
--- a/Python/Exercises/SplitFile/split_file.py
+++ b/Python/Exercises/SplitFile/split_file.py
@@ -15,8 +15,7 @@ def main():
continue
match = re.match(r'\s*size\s*=\s*(\d+)', line)
if match is not None:
- out_file = open('{0}_{1}.txt'.format(out_file_name_prefix,
- match.group(1)), 'w')
+ out_file = open('{0}_{1}.txt'.format(out_file_name_prefix, match[1]), 'w')
continue
if out_file is None:
sys.stderr.write('### error: no size in file before first data\n')
diff --git a/Python/Exercises/TreeChecker/check_tree_1.0.py b/Python/Exercises/TreeChecker/check_tree_1.0.py
index e8bbe9a1..31a6685a 100755
--- a/Python/Exercises/TreeChecker/check_tree_1.0.py
+++ b/Python/Exercises/TreeChecker/check_tree_1.0.py
@@ -16,18 +16,15 @@ def is_tree_ok(tree):
break
if nr_open_brackets == 0 and nr_characters < len(tree):
return False
- if nr_open_brackets != 0:
- return False
- return True
+ return nr_open_brackets == 0
def main():
tree = ''.join([line.strip() for line in sys.stdin.readlines()])
- if not is_tree_ok(tree):
- print('### error in tree')
- return 1
- else:
+ if is_tree_ok(tree):
return 0
+ print('### error in tree')
+ return 1
if __name__ == '__main__':
status = main()
diff --git a/Python/Exercises/TreeChecker/check_tree_2.0.py b/Python/Exercises/TreeChecker/check_tree_2.0.py
index 857bec02..6cc32d8f 100755
--- a/Python/Exercises/TreeChecker/check_tree_2.0.py
+++ b/Python/Exercises/TreeChecker/check_tree_2.0.py
@@ -51,10 +51,10 @@ def check_tree(tree):
bracket_positions.pop()
else:
raise MissingLBError(position)
- if len(bracket_positions) == 0:
+ if not bracket_positions:
break
position += 1
- if len(bracket_positions) == 0 and position < len(tree) - 1:
+ if not bracket_positions and position < len(tree) - 1:
raise TrailingCharsError(position + 1)
elif len(bracket_positions) > 0:
raise MissingRBError(bracket_positions.pop())
diff --git a/Python/Exercises/UnusedExercises/Downsampling/scale.py b/Python/Exercises/UnusedExercises/Downsampling/scale.py
index 41db6322..070fa1a5 100755
--- a/Python/Exercises/UnusedExercises/Downsampling/scale.py
+++ b/Python/Exercises/UnusedExercises/Downsampling/scale.py
@@ -14,17 +14,14 @@ def read_info(img_file):
def get_properties(img_file):
- prop = {}
- prop['type'] = read_info(img_file)
+ prop = {'type': read_info(img_file)}
prop['width'] = int(read_info(img_file))
prop['height'] = int(read_info(img_file))
return prop
def get_image(img_file, width, height):
- img = []
- for _ in range(height):
- img.append([0] * width)
+ img = [[0] * width for _ in range(height)]
bytes = width//8
if width % 8 != 0:
bytes += 1
@@ -43,9 +40,7 @@ def get_image(img_file, width, height):
def compute_black_fraction(img):
- nr_blacks = 0
- for row in img:
- nr_blacks += sum(row)
+ nr_blacks = sum(sum(row) for row in img)
return nr_blacks/(len(img)*len(img[0]))
@@ -78,9 +73,7 @@ def scale_image(img, threshold=2):
height = len(img)
new_width = width//2
new_height = height//2
- new_img = []
- for _ in range(new_height):
- new_img.append([0] * new_width)
+ new_img = [[0] * new_width for _ in range(new_height)]
for i in range(new_height):
for j in range(new_width):
avg = (img[2*i][2*j] + img[2*i][2*j + 1] +
diff --git a/Python/FiniteStateParser/basic_fs_parser.py b/Python/FiniteStateParser/basic_fs_parser.py
index 3f5dc277..094ba9b8 100755
--- a/Python/FiniteStateParser/basic_fs_parser.py
+++ b/Python/FiniteStateParser/basic_fs_parser.py
@@ -3,6 +3,7 @@
in begin/end pairs, the script will print the sorted values in the block,
with the block's name prepended."""
+
import re
import sys
@@ -13,63 +14,56 @@
print("### error: no file specified")
exit(1)
-# open file, specified on command line
-block_file = open(sys.argv[1], 'r')
-
-# compile the regular expressions to be used for performance reasons
-comment_pattern = re.compile(r"\s*#.*")
-block_start_pattern = re.compile(r"\s*begin\s+(\w+)")
-block_end_pattern = re.compile(r"\s*end\s+(\w+)")
+with open(sys.argv[1], 'r') as block_file:
+ # compile the regular expressions to be used for performance reasons
+ comment_pattern = re.compile(r"\s*#.*")
+ block_start_pattern = re.compile(r"\s*begin\s+(\w+)")
+ block_end_pattern = re.compile(r"\s*end\s+(\w+)")
-# current_block holds the name of the block that is being parsed, its
-# value is None when outside a block, note that it doubles as state
-# variable
-current_block = None
+ # current_block holds the name of the block that is being parsed, its
+ # value is None when outside a block, note that it doubles as state
+ # variable
+ current_block = None
-# dictionary to hold the blocks' content
-block_content = {}
-for line in block_file:
- # remove leading/triailing spaces, and comments (i.e., everything
- # following a '#'
- line = comment_pattern.sub("", line.strip())
- # ignore blank lines
- if len(line) == 0:
- continue
+ # dictionary to hold the blocks' content
+ block_content = {}
+ for line in block_file:
+ # remove leading/triailing spaces, and comments (i.e., everything
+ # following a '#'
+ line = comment_pattern.sub("", line.strip())
+ # ignore blank lines
+ if len(line) == 0:
+ continue
# check for begin block
- match = block_start_pattern.match(line)
- if match is not None:
- # if current_block is not None, we are already in a block,
- # raise error
- if current_block is not None:
- print(("### error: block %s is not close when opening %s" %
- (current_block, match.group(1))))
- exit(2)
+ match = block_start_pattern.match(line)
+ if match is not None:
+ # if current_block is not None, we are already in a block,
+ # raise error
+ if current_block is not None:
+ print(f"### error: block {current_block} is not close when opening {match[1]}")
+ exit(2)
# now in a block, so change state
- current_block = match.group(1)
- block_content[current_block] = []
- continue
+ current_block = match[1]
+ block_content[current_block] = []
+ continue
# check for end block
- match = block_end_pattern.match(line)
- if match is not None:
- # if the end block name is not the current block, raise error
- if match.group(1) != current_block:
- print(("### error: block %s is closed with %s" %
- (match.group(1), current_block)))
- exit(3)
+ match = block_end_pattern.match(line)
+ if match is not None:
+ # if the end block name is not the current block, raise error
+ if match[1] != current_block:
+ print(f"### error: block {match[1]} is closed with {current_block}")
+ exit(3)
# now out of a block, so change state
- current_block = None
- continue
+ current_block = None
+ continue
# if not in a block, ignore the line
- if current_block is None:
- continue
+ if current_block is None:
+ continue
# store block data
- block_content[current_block].append(line)
-
-# now print the data in the new format
-for key in list(block_content.keys()):
- block_content[key].sort()
- for value in block_content[key]:
- print("%s: '%s'" % (key, value))
+ block_content[current_block].append(line)
-# close the file
-block_file.close()
+ # now print the data in the new format
+ for key in list(block_content.keys()):
+ block_content[key].sort()
+ for value in block_content[key]:
+ print(f"{key}: '{value}'")
diff --git a/Python/FiniteStateParser/block_generator.py b/Python/FiniteStateParser/block_generator.py
index 059577f3..79ad2703 100755
--- a/Python/FiniteStateParser/block_generator.py
+++ b/Python/FiniteStateParser/block_generator.py
@@ -88,10 +88,7 @@ def _generate_value(self, data_block):
elif data_block.get_type() == 'int':
return random.randint(-1000, 1000)
else:
- rand_str = ''
- for char_nr in range(0, 5):
- rand_str += chr(random.randint(ord('A'), ord('Z')))
- return rand_str
+ return ''.join(chr(random.randint(ord('A'), ord('Z'))) for _ in range(0, 5))
def _create_block(self):
'''Create a new Block object'''
diff --git a/Python/FiniteStateParser/fs_parser.py b/Python/FiniteStateParser/fs_parser.py
index 1b56aa77..1e344e05 100755
--- a/Python/FiniteStateParser/fs_parser.py
+++ b/Python/FiniteStateParser/fs_parser.py
@@ -19,57 +19,48 @@ def parse(file_name):
"""function that takes a file name, and returns a dictionary of blocks,
the keys are the names of the blocks, the values are lists of their
content"""
-# open file, specified on command line
- block_file = open(file_name, 'r')
+ with open(file_name, 'r') as block_file:
# compile the regular expressions to be used for performance reasons
- comment_pattern = re.compile(r"\s*#.*")
- block_start_pattern = re.compile(r"\s*begin\s+(\w+)")
- block_end_pattern = re.compile(r"\s*end\s+(\w+)")
+ comment_pattern = re.compile(r"\s*#.*")
+ block_start_pattern = re.compile(r"\s*begin\s+(\w+)")
+ block_end_pattern = re.compile(r"\s*end\s+(\w+)")
# current_block holds the name of the block that is being parsed, its
# value is None when outside a block, note that it doubles as state
# variable
- current_block = None
+ current_block = None
# dictionary to hold the blocks' content
- block_content = {}
- for line in block_file:
- # remove leading/triailing spaces, and comments (i.e., everything
- # following a '#'
- line = comment_pattern.sub("", line.strip())
- # ignore blank lines
- if len(line) == 0:
- continue
- # check for begin block
- match = block_start_pattern.match(line)
- if match:
- # if current_block is not None, we are already in a block,
- # raise error
- if current_block:
- msg = "block %s is not close when opening %s" % \
- (current_block, match.group(1))
- raise ParseError(msg)
+ block_content = {}
+ for line in block_file:
+ # remove leading/triailing spaces, and comments (i.e., everything
+ # following a '#'
+ line = comment_pattern.sub("", line.strip())
+ # ignore blank lines
+ if len(line) == 0:
+ continue
+ if match := block_start_pattern.match(line):
+ # if current_block is not None, we are already in a block,
+ # raise error
+ if current_block:
+ msg = f"block {current_block} is not close when opening {match[1]}"
+ raise ParseError(msg)
# now in a block, so change state
- current_block = match.group(1)
- block_content[current_block] = []
- continue
-# check for end block
- match = block_end_pattern.match(line)
- if match:
- # if the end block name is not the current block, raise error
- if match.group(1) != current_block:
- msg = "block %s is closed with %s" % \
- (match.group(1), current_block)
- raise ParseError(msg)
+ current_block = match[1]
+ block_content[current_block] = []
+ continue
+ if match := block_end_pattern.match(line):
+ # if the end block name is not the current block, raise error
+ if match[1] != current_block:
+ msg = f"block {match[1]} is closed with {current_block}"
+ raise ParseError(msg)
# now out of a block, so change stateatch.group(1), current_block)
- current_block = None
- continue
+ current_block = None
+ continue
# if not in a block, ignore the line
- if current_block is None:
- continue
- else:
- # store block data
- block_content[current_block].append(line)
-# close the file
- block_file.close()
+ if current_block is None:
+ continue
+ else:
+ # store block data
+ block_content[current_block].append(line)
# now sort the data
for key in list(block_content.keys()):
block_content[key].sort()
@@ -86,7 +77,7 @@ def main():
content = parse(sys.argv[1])
for block_name in content:
for value in content[block_name]:
- print("%s: '%s'" % (block_name, value))
+ print(f"{block_name}: '{value}'")
if __name__ == "__main__":
main()
diff --git a/Python/FiniteStateParser/non_typed_fs_parser.py b/Python/FiniteStateParser/non_typed_fs_parser.py
index e51673a2..25911b97 100755
--- a/Python/FiniteStateParser/non_typed_fs_parser.py
+++ b/Python/FiniteStateParser/non_typed_fs_parser.py
@@ -20,9 +20,8 @@ def main():
parser.set_verbosity(args.verbosity)
print('verbosity: {0}'.format(parser._verbose))
try:
- block_file = open(args.file[0], 'r')
- blocks = parser.parse(block_file)
- block_file.close()
+ with open(args.file[0], 'r') as block_file:
+ blocks = parser.parse(block_file)
except IOError as exception:
print('### error: {0}'.format(exception))
exit(2)
diff --git a/Python/FiniteStateParser/oo_fs_parser.py b/Python/FiniteStateParser/oo_fs_parser.py
index a6968ef0..ba031755 100755
--- a/Python/FiniteStateParser/oo_fs_parser.py
+++ b/Python/FiniteStateParser/oo_fs_parser.py
@@ -21,57 +21,58 @@ def parse(file_name):
'''function that takes a file name, and returns a list of blocks,
instances of class Block.'''
from block import Block
-# open file, specified on command line
- block_file = open(file_name, 'r')
+ with open(file_name, 'r') as block_file:
# compile the regular expressions to be used for performance reasons
- comment_pattern = re.compile(r'\s*#.*')
- block_start_pattern = re.compile(r'\s*begin\s+(\w+)')
- block_end_pattern = re.compile(r'\s*end\s+(\w+)')
+ comment_pattern = re.compile(r'\s*#.*')
+ block_start_pattern = re.compile(r'\s*begin\s+(\w+)')
+ block_end_pattern = re.compile(r'\s*end\s+(\w+)')
# current_block holds an instance of Block for the block that is being
# parsed, its # value is None when outside a block, note that it doubles
# as state variable
- current_block = None
+ current_block = None
# list to hold the blocks
- blocks = []
- for line in block_file:
- # remove leading/triailing spaces, and comments (i.e., everything
- # following a '#'
- line = comment_pattern.sub('', line.strip())
+ blocks = []
+ for line in block_file:
+ # remove leading/triailing spaces, and comments (i.e., everything
+ # following a '#'
+ line = comment_pattern.sub('', line.strip())
# ignore blank lines
- if len(line) == 0:
- continue
+ if len(line) == 0:
+ continue
# check for begin block
- match = block_start_pattern.match(line)
- if match is not None:
- # if current_block is not None, we are already in a block,
- # raise error
- if current_block is not None:
- raise ParseError(
- 'block {0} is not close when opening {1}'.format(
- current_block.get_name(), match.group(1)))
+ match = block_start_pattern.match(line)
+ if match is not None:
+ # if current_block is not None, we are already in a block,
+ # raise error
+ if current_block is not None:
+ raise ParseError(
+ 'block {0} is not close when opening {1}'.format(
+ current_block.get_name(), match[1]
+ )
+ )
# now in a block, so change state
- current_block = Block(match.group(1))
- continue
+ current_block = Block(match[1])
+ continue
# check for end block
- match = block_end_pattern.match(line)
- if match is not None:
- # if the end block name is not the current block, raise error
- if match.group(1) != current_block.get_name():
- raise ParseError(
- 'block {0} is closed with {1}'.format(
- match.group(1), current_block.get_name()))
+ match = block_end_pattern.match(line)
+ if match is not None:
+ # if the end block name is not the current block, raise error
+ if match[1] != current_block.get_name():
+ raise ParseError(
+ 'block {0} is closed with {1}'.format(
+ match[1], current_block.get_name()
+ )
+ )
# now out of a block, add current block to the list, and change state
- blocks.append(current_block)
- current_block = None
- continue
+ blocks.append(current_block)
+ current_block = None
+ continue
# if not in a block, ignore the line
- if current_block is None:
- continue
- else:
- # store block data
- current_block.add_data(line)
-# close the file
- block_file.close()
+ if current_block is None:
+ continue
+ else:
+ # store block data
+ current_block.add_data(line)
return blocks
diff --git a/Python/FiniteStateParser/parser.py b/Python/FiniteStateParser/parser.py
index 2c1240c3..4ab41ca5 100644
--- a/Python/FiniteStateParser/parser.py
+++ b/Python/FiniteStateParser/parser.py
@@ -154,11 +154,10 @@ def parse_iter(self, block_file):
raise err.DanglingEndBlockError(self)
elif self._is_in_state('in_block'):
if self._is_end_block(line):
- if self._end_matches_begin():
- yield self._finish_block()
- self._set_state('not_in_block')
- else:
+ if not self._end_matches_begin():
raise err.NonMatchingBlockDelimitersError(self)
+ yield self._finish_block()
+ self._set_state('not_in_block')
elif self._is_begin_block(line):
raise err.NestedBlocksError(self)
elif self._is_data(line):
@@ -403,11 +402,9 @@ def test_typed_parse_iter_blocks(self):
parser = TypedBlockParser()
blocks = parser.parse(block_file)
block_file.seek(0)
- block_nr = 0
- for parsed_block in parser.parse_iter(block_file):
+ for block_nr, parsed_block in enumerate(parser.parse_iter(block_file)):
self.assertEqual(str(blocks[block_nr]),
str(parsed_block))
- block_nr += 1
if __name__ == '__main__':
diff --git a/Python/FiniteStateParser/pyparsing_block_parser.py b/Python/FiniteStateParser/pyparsing_block_parser.py
index c47b2fbf..0fc438e0 100755
--- a/Python/FiniteStateParser/pyparsing_block_parser.py
+++ b/Python/FiniteStateParser/pyparsing_block_parser.py
@@ -55,7 +55,7 @@ def _define_grammar(self):
data_value = Combine(OneOrMore(CharsNotIn('#\n\r')))
data_line = (LineStart() + white + Optional(data_value) +
Optional(comment) + eol)
- block_name = Word(alphas, alphanums + '_')
+ block_name = Word(alphas, f'{alphanums}_')
begin_block = (LineStart() + begin + block_name +
Optional(comment) + eol)
end_block = LineStart() + end + block_name + Optional(comment) + eol
diff --git a/Python/FiniteStateParser/pyparsing_block_parser_script.py b/Python/FiniteStateParser/pyparsing_block_parser_script.py
index 50d960fb..add367b2 100755
--- a/Python/FiniteStateParser/pyparsing_block_parser_script.py
+++ b/Python/FiniteStateParser/pyparsing_block_parser_script.py
@@ -41,7 +41,7 @@ def handle_data(token):
comment = (Literal('#') + restOfLine).suppress()
data_value = Combine(OneOrMore(CharsNotIn('#\n\r')))
data = LineStart() + Optional(data_value) + Optional(comment) + eol
-block_name = Word(alphas, alphanums + '_')
+block_name = Word(alphas, f'{alphanums}_')
begin_block = LineStart() + begin + block_name + Optional(comment) + eol
end_block = LineStart() + end + block_name + Optional(comment) + eol
junk = ZeroOrMore(LineStart() + NotAny(begin) + restOfLine + eol).suppress()
diff --git a/Python/FiniteStateParser/simple_parser.py b/Python/FiniteStateParser/simple_parser.py
index 7504d466..851e0b0b 100755
--- a/Python/FiniteStateParser/simple_parser.py
+++ b/Python/FiniteStateParser/simple_parser.py
@@ -149,11 +149,10 @@ def parse(self, block_file):
if self._is_begin_block(line):
raise err.NestedBlocksError(self)
elif self._is_end_block(line):
- if self._end_matches_begin():
- self._finish_block()
- self._set_state('not_in_block')
- else:
+ if not self._end_matches_begin():
raise err.NonMatchingBlockDelimitersError(self)
+ self._finish_block()
+ self._set_state('not_in_block')
elif self._is_data(line):
self._add_data(line)
elif self._is_in_state('not_in_block'):
diff --git a/Python/FiniteStateParser/struct_fs_parser.py b/Python/FiniteStateParser/struct_fs_parser.py
index 6d330fac..e1ec519e 100755
--- a/Python/FiniteStateParser/struct_fs_parser.py
+++ b/Python/FiniteStateParser/struct_fs_parser.py
@@ -70,8 +70,7 @@ def check_is_not_in_block():
def check_end_matches_begin():
global current_block, last_match
if last_match.group(1) != current_block:
- msg = "block %s is closed with %s" % \
- (last_match.group(1), current_block)
+ msg = f"block {last_match.group(1)} is closed with {current_block}"
raise ParseError(msg)
def store_data(line):
@@ -82,22 +81,20 @@ def sort_block_data():
global block_content
for key in list(block_content.keys()):
block_content[key].sort()
-# open file, specified on command line
- block_file = open(file_name, 'r')
+
+ with open(file_name, 'r') as block_file:
# iterate over the lines in the file and process
- for line in block_file:
- line = filter_line(line)
- if not is_blank(line):
- if is_block_begin(line):
- check_is_not_in_block()
- set_current_block()
- elif is_block_end(line):
- check_end_matches_begin()
- current_block = None
- elif is_in_block():
- store_data(line)
-# close the file
- block_file.close()
+ for line in block_file:
+ line = filter_line(line)
+ if not is_blank(line):
+ if is_block_begin(line):
+ check_is_not_in_block()
+ set_current_block()
+ elif is_block_end(line):
+ check_end_matches_begin()
+ current_block = None
+ elif is_in_block():
+ store_data(line)
sort_block_data()
return block_content
@@ -112,7 +109,7 @@ def main():
content = parse(sys.argv[1])
for block_name in list(content.keys()):
for value in content[block_name]:
- print("%s: '%s'" % (block_name, value))
+ print(f"{block_name}: '{value}'")
if __name__ == "__main__":
main()
diff --git a/Python/Flask/fib.py b/Python/Flask/fib.py
index ead0fc2b..d50ec478 100755
--- a/Python/Flask/fib.py
+++ b/Python/Flask/fib.py
@@ -14,16 +14,15 @@ def reset():
@app.route('/')
def start():
- if 'number' in session:
- session['number'] += 1
- fib = session['fib']
- session['fib'] += session['fib_prev']
- session['fib_prev'] = fib
- return render_template('fib.html',
- number=session['number'],
- fibonacci=session['fib'])
- else:
+ if 'number' not in session:
return reset()
+ session['number'] += 1
+ fib = session['fib']
+ session['fib'] += session['fib_prev']
+ session['fib_prev'] = fib
+ return render_template('fib.html',
+ number=session['number'],
+ fibonacci=session['fib'])
if __name__ == '__main__':
app.run(threaded=True)
diff --git a/Python/Flask/sum.py b/Python/Flask/sum.py
index 3e0b9c75..74bb735e 100755
--- a/Python/Flask/sum.py
+++ b/Python/Flask/sum.py
@@ -6,13 +6,12 @@
@app.route('/', methods=['GET', 'POST'])
def start():
- if request.method == 'POST':
- op1 = float(request.form['op1'])
- op2 = float(request.form['op2'])
- result = op1 + op2
- return render_template('sum.html', op1=op1, op2=op2, result=result)
- else:
+ if request.method != 'POST':
return render_template('sum.html')
+ op1 = float(request.form['op1'])
+ op2 = float(request.form['op2'])
+ result = op1 + op2
+ return render_template('sum.html', op1=op1, op2=op2, result=result)
if __name__ == '__main__':
app.run()
diff --git a/Python/Fundamentals/class_attributes.py b/Python/Fundamentals/class_attributes.py
index bd41cbcb..cafac27a 100755
--- a/Python/Fundamentals/class_attributes.py
+++ b/Python/Fundamentals/class_attributes.py
@@ -21,12 +21,11 @@ def distance(self, p):
return math.sqrt((self.x - p.x)**2 + (self.y - p.y)**2)
def on_line(self, p, q, tol=1.0e-6):
- if math.fabs(p.x - q.x) > tol:
- a = (q.x - p.x)/(q.y - p.y)
- b = p.y - a*p.x
- return math.fabs(self.y - a*self.x - b) < tol
- else:
+ if math.fabs(p.x - q.x) <= tol:
return math.fabs(self.x - p.x) < tol
+ a = (q.x - p.x)/(q.y - p.y)
+ b = p.y - a*p.x
+ return math.fabs(self.y - a*self.x - b) < tol
def __str__(self):
return '({x}, {y})'.format(x=self.x, y=self.y)
diff --git a/Python/Fundamentals/inheritance.py b/Python/Fundamentals/inheritance.py
index a5b8d4cd..bbbae0d8 100755
--- a/Python/Fundamentals/inheritance.py
+++ b/Python/Fundamentals/inheritance.py
@@ -42,19 +42,19 @@ def child_var(self):
return self.__new_obj_var
def __str__(self):
- return super().__str__() + ' ' + str(self.child_var)
+ return f'{super().__str__()} {str(self.child_var)}'
if __name__ == '__main__':
p = Parent('abc')
c = Child('bde', 'efg')
- print('Parent: {}'.format(Parent.get_class_var()))
- print('Child: {}'.format(Child.get_class_var()))
+ print(f'Parent: {Parent.get_class_var()}')
+ print(f'Child: {Child.get_class_var()}')
print('setting Child class variable')
Child.set_class_var(15)
- print('Parent: {}'.format(Parent.get_class_var()))
- print('Child: {}'.format(Child.get_class_var()))
+ print(f'Parent: {Parent.get_class_var()}')
+ print(f'Child: {Child.get_class_var()}')
print('setting Parent class variable')
Parent.set_class_var(25)
- print('Parent: {}'.format(Parent.get_class_var()))
- print('Child: {}'.format(Child.get_class_var()))
+ print(f'Parent: {Parent.get_class_var()}')
+ print(f'Child: {Child.get_class_var()}')
diff --git a/Python/Fundamentals/io_switch.py b/Python/Fundamentals/io_switch.py
index d7205580..f7e1bbeb 100755
--- a/Python/Fundamentals/io_switch.py
+++ b/Python/Fundamentals/io_switch.py
@@ -10,10 +10,7 @@
str_io = StringIO()
for line in ['abc', 'def', 'ghi']:
str_io.write(line + '\n')
- if options.output:
- output = open(options.output, 'w')
- else:
- output = sys.stdout
+ output = open(options.output, 'w') if options.output else sys.stdout
output.write(str_io.getvalue())
if options.output:
output.close()
diff --git a/Python/Fundamentals/new_point.py b/Python/Fundamentals/new_point.py
index 5dcc6759..c6746273 100644
--- a/Python/Fundamentals/new_point.py
+++ b/Python/Fundamentals/new_point.py
@@ -34,19 +34,15 @@ def distance(self, p):
return math.sqrt((self.x - p.x)**2 + (self.y - p.y)**2)
def on_line(self, p, q, tol=1.0e-6):
- if math.fabs(p.x - q.x) > tol:
- a = (q.x - p.x)/(q.y - p.y)
- b = p.y - a*p.x
- return math.fabs(self.y - a*self.x - b) < tol
- else:
+ if math.fabs(p.x - q.x) <= tol:
return math.fabs(self.x - p.x) < tol
+ a = (q.x - p.x)/(q.y - p.y)
+ b = p.y - a*p.x
+ return math.fabs(self.y - a*self.x - b) < tol
@staticmethod
def all_on_line(p, q, *points):
- for r in points:
- if not r.on_line(p, q):
- return False
- return True
+ return all(r.on_line(p, q) for r in points)
def __str__(self):
return '{id}: ({x}, {y})'.format(x=self.x, y=self.y, id=self.id)
diff --git a/Python/Fundamentals/point.py b/Python/Fundamentals/point.py
index 0d53c190..c434534d 100644
--- a/Python/Fundamentals/point.py
+++ b/Python/Fundamentals/point.py
@@ -28,19 +28,15 @@ def distance(self, p):
return math.sqrt((self.x - p.x)**2 + (self.y - p.y)**2)
def on_line(self, p, q, tol=1.0e-6):
- if not math.isclose(p.x, q.x, tol):
- a = (q.y - p.y)/(q.x - p.x)
- b = p.y - a*p.x
- return math.isclose(self.y, a*self.x + b, tol)
- else:
+ if math.isclose(p.x, q.x, tol):
return math.isclose(self.x, p.x, tol)
+ a = (q.y - p.y)/(q.x - p.x)
+ b = p.y - a*p.x
+ return math.isclose(self.y, a*self.x + b, tol)
@staticmethod
def all_on_line(p, q, *points):
- for r in points:
- if not r.on_line(p, q):
- return False
- return True
+ return all(r.on_line(p, q) for r in points)
def __str__(self):
return '({x}, {y})'.format(x=self.x, y=self.y)
@@ -52,10 +48,7 @@ class PointMass(Point):
def __init__(self, x, y, mass=None):
super().__init__(x, y)
- if mass:
- self._mass = float(mass)
- else:
- self._mass = self.__class__.get_default_mass()
+ self._mass = float(mass) if mass else self.__class__.get_default_mass()
@property
def mass(self):
diff --git a/Python/Fundamentals/point_2.py b/Python/Fundamentals/point_2.py
index 262d9d55..0c947d11 100644
--- a/Python/Fundamentals/point_2.py
+++ b/Python/Fundamentals/point_2.py
@@ -31,12 +31,11 @@ def distance(self, p):
return math.sqrt((self.x - p.x)**2 + (self.y - p.y)**2)
def on_line(self, p, q, tol=1.0e-6):
- if math.fabs(p.x - q.x) > tol:
- a = (q.y - p.y)/(q.x - p.x)
- b = p.y - a*p.x
- return math.fabs(self.y - a*self.x - b) < tol
- else:
+ if math.fabs(p.x - q.x) <= tol:
return math.fabs(self.x - p.x) < tol
+ a = (q.y - p.y)/(q.x - p.x)
+ b = p.y - a*p.x
+ return math.fabs(self.y - a*self.x - b) < tol
@staticmethod
def all_on_line(p, q, *points):
diff --git a/Python/Fundamentals/primes.py b/Python/Fundamentals/primes.py
index 665c9455..bbc2b2b6 100755
--- a/Python/Fundamentals/primes.py
+++ b/Python/Fundamentals/primes.py
@@ -4,10 +4,7 @@
def is_prime(n):
- for i in range(2, round(math.sqrt(n)) + 1):
- if n % i == 0:
- return False
- return True
+ return all(n % i != 0 for i in range(2, round(math.sqrt(n)) + 1))
def next_prime():
diff --git a/Python/Fundamentals/primes_iter.py b/Python/Fundamentals/primes_iter.py
index f2352ecf..3548b6c9 100755
--- a/Python/Fundamentals/primes_iter.py
+++ b/Python/Fundamentals/primes_iter.py
@@ -4,10 +4,7 @@
def is_prime(n):
- for i in range(2, int(math.sqrt(n)) + 1):
- if n % i == 0:
- return False
- return True
+ return all(n % i != 0 for i in range(2, int(math.sqrt(n)) + 1))
def next_prime():
diff --git a/Python/Fundamentals/primes_itertools.py b/Python/Fundamentals/primes_itertools.py
index 38c2a6c8..5c665acc 100755
--- a/Python/Fundamentals/primes_itertools.py
+++ b/Python/Fundamentals/primes_itertools.py
@@ -9,10 +9,7 @@ def is_prime(n):
'''returns True if n is prime, False otherwise'''
if n <= 2:
return False
- for i in range(2, 1 + int(math.sqrt(n))):
- if n % i == 0:
- return False
- return True
+ return all(n % i != 0 for i in range(2, 1 + int(math.sqrt(n))))
def main():
diff --git a/Python/Fundamentals/sophisticated_counting.py b/Python/Fundamentals/sophisticated_counting.py
index ee17a350..8dbed14a 100755
--- a/Python/Fundamentals/sophisticated_counting.py
+++ b/Python/Fundamentals/sophisticated_counting.py
@@ -8,12 +8,10 @@ def main():
threshold = 1.0e-7
categories = {
'negative': lambda x: x < -threshold,
- 'zero': lambda x: -threshold <= x and x <= threshold,
+ 'zero': lambda x: -threshold <= x <= threshold,
'positive': lambda x: threshold < x,
}
- counter = {}
- for name in categories.keys():
- counter[name] = 0
+ counter = {name: 0 for name in categories}
for line in sys.stdin:
value = float(line.rstrip('\r\n').split()[2])
for name, cond in categories.items():
diff --git a/Python/Fundamentals/validator.py b/Python/Fundamentals/validator.py
index e8e318d3..230acd0c 100755
--- a/Python/Fundamentals/validator.py
+++ b/Python/Fundamentals/validator.py
@@ -4,11 +4,10 @@
def validate(case_nr, dim_nr, temp):
- if dim_nr == 3 and temp < 0.0:
- sys.stderr.write('# error in case {0}\n'.format(case_nr))
- return False
- else:
+ if dim_nr != 3 or temp >= 0.0:
return True
+ sys.stderr.write('# error in case {0}\n'.format(case_nr))
+ return False
def parse_line(line):
diff --git a/Python/ImageProcessing/analyze.py b/Python/ImageProcessing/analyze.py
index ed5957dc..f214b3e4 100755
--- a/Python/ImageProcessing/analyze.py
+++ b/Python/ImageProcessing/analyze.py
@@ -13,22 +13,21 @@
counter = 0
try:
densities = []
- while (True):
+ while True:
status, frame = capture.read()
- if status:
- counter += 1
- nr_pixels = frame.shape[0]*frame.shape[1]
- red_channel = frame[:, :, 0]
- green_channel = frame[:, :, 1]
- blue_channel = frame[:, :, 2]
- densities.append((counter,
- red_channel.sum()/nr_pixels,
- green_channel.sum()/nr_pixels,
- blue_channel.sum()/nr_pixels))
- cv2.imshow('frame', frame)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
- else:
+ if not status:
+ break
+ counter += 1
+ nr_pixels = frame.shape[0]*frame.shape[1]
+ red_channel = frame[:, :, 0]
+ green_channel = frame[:, :, 1]
+ blue_channel = frame[:, :, 2]
+ densities.append((counter,
+ red_channel.sum()/nr_pixels,
+ green_channel.sum()/nr_pixels,
+ blue_channel.sum()/nr_pixels))
+ cv2.imshow('frame', frame)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
diff --git a/Python/ImageProcessing/capture.py b/Python/ImageProcessing/capture.py
index 9d7466a9..d3f21fa7 100755
--- a/Python/ImageProcessing/capture.py
+++ b/Python/ImageProcessing/capture.py
@@ -25,15 +25,14 @@
try:
while True:
status, frame = capture.read()
- if status:
- counter += 1
- # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- if options.output:
- output.write(frame)
- cv2.imshow('frame', frame)
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
- else:
+ if not status:
+ break
+ counter += 1
+ # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ if options.output:
+ output.write(frame)
+ cv2.imshow('frame', frame)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
break
except KeyboardInterrupt:
capture.release()
diff --git a/Python/ImageProcessing/follow_ball.py b/Python/ImageProcessing/follow_ball.py
index 40d5eee7..69408778 100755
--- a/Python/ImageProcessing/follow_ball.py
+++ b/Python/ImageProcessing/follow_ball.py
@@ -39,7 +39,7 @@
(frame.shape[1], frame.shape[0]))
else:
output = None
-
+
track_window = (options.col, options.row, options.width, options.height)
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_frame,
@@ -52,24 +52,23 @@
nr_frames = 1
while True:
status, frame = capture.read()
- if status == True:
- nr_frames += 1
- hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
- dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 50], 1)
- status, track_window = cv2.meanShift(dst, track_window,
- termination_cond)
- if not status:
- continue
- x, y, w, h = track_window
- img = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
- cv2.imshow('image', img)
- if output:
- output.write(img)
- if cv2.waitKey(1) % 0xFF == ord('q'):
- break
- else:
+ if status != True:
break
+ nr_frames += 1
+ hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
+ dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 50], 1)
+ status, track_window = cv2.meanShift(dst, track_window,
+ termination_cond)
+ if not status:
+ continue
+ x, y, w, h = track_window
+ img = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
+ cv2.imshow('image', img)
+ if output:
+ output.write(img)
+ if cv2.waitKey(1) % 0xFF == ord('q'):
+ break
print(f'{nr_frames} frames processed')
cv2.destroyAllWindows()
if output:
diff --git a/Python/ImageProcessing/split_scenes.py b/Python/ImageProcessing/split_scenes.py
index b9ae2206..0ed86b46 100755
--- a/Python/ImageProcessing/split_scenes.py
+++ b/Python/ImageProcessing/split_scenes.py
@@ -49,27 +49,26 @@ def write_movie(base_name, extension, scene_nr, frames, fps):
prev_rgb = avg_rgb(frame)
scene_nr = 1
saved_scenes = 0
- while (True):
+ while True:
status, frame = capture.read()
- if status:
- counter += 1
- cv2.imshow('frame', frame)
- rgb = avg_rgb(frame)
- if is_new_scene(prev_rgb, rgb, options.threshold):
- if len(frames) >= options.min_frames:
- saved_scenes += 1
- write_movie(base_name, extension, saved_scenes,
- frames, options.fps)
- print(f'scene {scene_nr}: {len(frames)} frames')
- scene_nr += 1
- counter = 1
- frames = [frame]
- else:
- frames.append(frame)
- prev_rgb = rgb
- if cv2.waitKey(1) & 0xFF == ord('q'):
- break
+ if not status:
+ break
+ counter += 1
+ cv2.imshow('frame', frame)
+ rgb = avg_rgb(frame)
+ if is_new_scene(prev_rgb, rgb, options.threshold):
+ if len(frames) >= options.min_frames:
+ saved_scenes += 1
+ write_movie(base_name, extension, saved_scenes,
+ frames, options.fps)
+ print(f'scene {scene_nr}: {len(frames)} frames')
+ scene_nr += 1
+ counter = 1
+ frames = [frame]
else:
+ frames.append(frame)
+ prev_rgb = rgb
+ if cv2.waitKey(1) & 0xFF == ord('q'):
break
if frames:
if len(frames) >= options.min_frames:
diff --git a/Python/Introspection/parameterized.py b/Python/Introspection/parameterized.py
index 880ceff0..e87a55c1 100644
--- a/Python/Introspection/parameterized.py
+++ b/Python/Introspection/parameterized.py
@@ -22,10 +22,10 @@ def __init__(self, name, ptype, value):
not of the specified type.
'''
if type(name) is not str:
- raise TypeError(f'name should be a str')
+ raise TypeError('name should be a str')
self._name = name
if type(ptype) is not type:
- raise TypeError(f'type should be a Python type')
+ raise TypeError('type should be a Python type')
self._type = ptype
self._value = self._type(value)
@@ -148,7 +148,7 @@ def _add_parameter(self, parameter):
parameter: Parameter object
'''
if '__parameters' not in self.__dict__:
- self.__dict__['__parameters'] = dict()
+ self.__dict__['__parameters'] = {}
self.__dict__['__parameters'][parameter.name] = parameter
def __getattr__(self, name):
@@ -198,4 +198,4 @@ def get_parameter_names(self):
if hasattr(self, '__parameters'):
return list(self.__dict__['__parameters'].keys())
else:
- return list()
+ return []
diff --git a/Python/Ising/averager.py b/Python/Ising/averager.py
index a1f64a1f..4a0684d0 100644
--- a/Python/Ising/averager.py
+++ b/Python/Ising/averager.py
@@ -43,22 +43,21 @@ def quantities(self):
return self._quantities.keys()
def get(self, quantity):
- if quantity in self._quantities:
- result = {}
- if type(self._quantities[quantity][0]) == dict:
- for dictionary in self._quantities[quantity]:
- for key in dictionary:
- if key not in result:
- result[key] = 0
- result[key] += dictionary[key]
- for key in result:
- result[key] /= float(len(self._quantities[quantity]))
- else:
- result['mean'] = np.mean(self._quantities[quantity])
- result['std'] = np.std(self._quantities[quantity])
- result['min'] = np.min(self._quantities[quantity])
- result['max'] = np.max(self._quantities[quantity])
- result['N'] = len(self._quantities[quantity])
- return result
- else:
+ if quantity not in self._quantities:
raise UnknownQuantityError(quantity)
+ result = {}
+ if type(self._quantities[quantity][0]) == dict:
+ for dictionary in self._quantities[quantity]:
+ for key in dictionary:
+ if key not in result:
+ result[key] = 0
+ result[key] += dictionary[key]
+ for key in result:
+ result[key] /= float(len(self._quantities[quantity]))
+ else:
+ result['mean'] = np.mean(self._quantities[quantity])
+ result['std'] = np.std(self._quantities[quantity])
+ result['min'] = np.min(self._quantities[quantity])
+ result['max'] = np.max(self._quantities[quantity])
+ result['N'] = len(self._quantities[quantity])
+ return result
diff --git a/Python/Ising/domain_counter.py b/Python/Ising/domain_counter.py
index 9cf465ae..e3ac0b01 100644
--- a/Python/Ising/domain_counter.py
+++ b/Python/Ising/domain_counter.py
@@ -5,11 +5,11 @@
def compute_neighbouts(coord, size):
neighbours = []
- if coord[0] - 1 >= 0:
+ if coord[0] >= 1:
neighbours.append((coord[0] - 1, coord[1]))
if coord[0] + 1 < size:
neighbours.append((coord[0] + 1, coord[1]))
- if coord[1] - 1 >= 0:
+ if coord[1] >= 1:
neighbours.append((coord[0], coord[1] - 1))
if coord[1] + 1 < size:
neighbours.append((coord[0], coord[1] + 1))
@@ -19,7 +19,7 @@ def compute_neighbouts(coord, size):
def find_domain(ising, domains, cd, domain_nr):
queue = [cd]
domains[cd] = domain_nr
- while len(queue) > 0:
+ while queue:
cd = queue.pop()
for nb in compute_neighbouts(cd, ising.N()):
if domains[nb] == -1 and ising.s(*cd) == ising.s(*nb):
diff --git a/Python/Ising/ising_demo.py b/Python/Ising/ising_demo.py
index ad4d981b..f137ce18 100755
--- a/Python/Ising/ising_demo.py
+++ b/Python/Ising/ising_demo.py
@@ -38,10 +38,7 @@
help='show progress information')
options = arg_parser.parse_args()
ising = IsingSystem(options.N, options.J, options.H, options.T)
- if options.seed:
- seed = options.seed
- else:
- seed = random.randint(0, 1000000000)
+ seed = options.seed if options.seed else random.randint(0, 1000000000)
ising.init_random(seed)
if options.mode == 'single_run':
runner = SingleRunner(ising, is_verbose=options.verbose)
diff --git a/Python/Ising/ising_sim.py b/Python/Ising/ising_sim.py
index 480aca66..0f69739d 100755
--- a/Python/Ising/ising_sim.py
+++ b/Python/Ising/ising_sim.py
@@ -42,53 +42,49 @@
arg_parser.add_argument('--python', action='store_true',
help='use pure Python implementation')
options = arg_parser.parse_args()
- magn_file = open('{0}-magn.txt'.format(options.file), 'w')
- domain_file = open('{0}-domains.txt'.format(options.file), 'w')
- if options.seed is None:
- seed = random.randint(0, 1000000000)
- else:
- seed = options.seed
- if options.python:
- ising_module = importlib.import_module('ising')
- else:
- ising_module = importlib.import_module('ising_cxx')
- hdr_line_fmt = 'T {M:s} {E:s} {deltaE2:s}\n'
- hdr_fmt = '{0:s} {0:s}_std {0:s}_min {0:s}_max'
- val_line_fmt = '{T:.4f} {M:s} {E:s} {deltaE2:s}\n'
- val_fmt = '{mean:.5e} {std:.5e} {min:.5e} {max:.5e}'
- M_hdr = hdr_fmt.format('M')
- E_hdr = hdr_fmt.format('E')
- deltaE2_hdr = hdr_fmt.format('deltaE^2')
- magn_file.write(hdr_line_fmt.format(M=M_hdr, E=E_hdr,
- deltaE2=deltaE2_hdr))
- print_options(magn_file, options)
- domain_file.write('T domain_sizes\n')
- print_options(domain_file, options)
- for T in (float(T_str) for T_str in options.T.split(',')):
- if options.verbose > 0:
- sys.stderr.write('# computing T = {0:.4f}\n'.format(T))
- ising = ising_module.IsingSystem(options.N, options.J, options.H, T)
- ising.init_random(seed)
- runner = DomainSizeRunner(ising=None, steps=options.steps,
- is_verbose=options.verbose - 2,
- burn_in=options.burn_in,
- sample_period=options.sample_period,
- window=options.window)
- averager = Averager(runner, ising, is_verbose=options.verbose - 1)
- averager.average(options.runs)
- M_values = averager.get('M mean')
- M_str = val_fmt.format(**M_values)
- E_values = averager.get('E mean')
- E_str = val_fmt.format(**E_values)
- deltaE2_values = averager.get('deltaE^2')
- deltaE2_str = val_fmt.format(**deltaE2_values)
- magn_file.write(val_line_fmt.format(T=T, M=M_str, E=E_str,
- deltaE2=deltaE2_str))
- magn_file.flush()
- domains = averager.get('domains')
- distrubtion = ','.join(['{0:d}:{1:.8e}'.format(k, v)
- for k, v in domains.items()])
- domain_file.write('{0:.4f} {1:s}\n'.format(T, distrubtion))
- domain_file.flush()
- magn_file.close()
+ with open('{0}-magn.txt'.format(options.file), 'w') as magn_file:
+ domain_file = open('{0}-domains.txt'.format(options.file), 'w')
+ seed = random.randint(0, 1000000000) if options.seed is None else options.seed
+ if options.python:
+ ising_module = importlib.import_module('ising')
+ else:
+ ising_module = importlib.import_module('ising_cxx')
+ hdr_line_fmt = 'T {M:s} {E:s} {deltaE2:s}\n'
+ hdr_fmt = '{0:s} {0:s}_std {0:s}_min {0:s}_max'
+ val_line_fmt = '{T:.4f} {M:s} {E:s} {deltaE2:s}\n'
+ val_fmt = '{mean:.5e} {std:.5e} {min:.5e} {max:.5e}'
+ M_hdr = hdr_fmt.format('M')
+ E_hdr = hdr_fmt.format('E')
+ deltaE2_hdr = hdr_fmt.format('deltaE^2')
+ magn_file.write(hdr_line_fmt.format(M=M_hdr, E=E_hdr,
+ deltaE2=deltaE2_hdr))
+ print_options(magn_file, options)
+ domain_file.write('T domain_sizes\n')
+ print_options(domain_file, options)
+ for T in (float(T_str) for T_str in options.T.split(',')):
+ if options.verbose > 0:
+ sys.stderr.write('# computing T = {0:.4f}\n'.format(T))
+ ising = ising_module.IsingSystem(options.N, options.J, options.H, T)
+ ising.init_random(seed)
+ runner = DomainSizeRunner(ising=None, steps=options.steps,
+ is_verbose=options.verbose - 2,
+ burn_in=options.burn_in,
+ sample_period=options.sample_period,
+ window=options.window)
+ averager = Averager(runner, ising, is_verbose=options.verbose - 1)
+ averager.average(options.runs)
+ M_values = averager.get('M mean')
+ M_str = val_fmt.format(**M_values)
+ E_values = averager.get('E mean')
+ E_str = val_fmt.format(**E_values)
+ deltaE2_values = averager.get('deltaE^2')
+ deltaE2_str = val_fmt.format(**deltaE2_values)
+ magn_file.write(val_line_fmt.format(T=T, M=M_str, E=E_str,
+ deltaE2=deltaE2_str))
+ magn_file.flush()
+ domains = averager.get('domains')
+ distrubtion = ','.join(['{0:d}:{1:.8e}'.format(k, v)
+ for k, v in domains.items()])
+ domain_file.write('{0:.4f} {1:s}\n'.format(T, distrubtion))
+ domain_file.flush()
domain_file.close()
diff --git a/Python/Ising/runner.py b/Python/Ising/runner.py
index 9dcaa7c8..01706b10 100644
--- a/Python/Ising/runner.py
+++ b/Python/Ising/runner.py
@@ -126,10 +126,7 @@ def clone(self):
self._is_verbose)
def _prologue(self):
- if self._file_name:
- self._file = open(self._file_name, 'w')
- else:
- self._file = sys.stdout
+ self._file = open(self._file_name, 'w') if self._file_name else sys.stdout
self._file.write('t M E\n')
self._file.write('# T = {0:.3f}\n'.format(self._ising.T()))
self._file.write('# N = {0:d}\n'.format(self._ising.N()))
diff --git a/Python/Iterators/count_downs.py b/Python/Iterators/count_downs.py
index d7eacc43..626e37d8 100755
--- a/Python/Iterators/count_downs.py
+++ b/Python/Iterators/count_downs.py
@@ -23,12 +23,11 @@ def __iter__(self):
def __next__(self):
'''Returns the next value, and changes state, called in each
iteration'''
- if self._current >= 0:
- value = self._current
- self._current -= 1
- return value
- else:
+ if self._current < 0:
raise StopIteration()
+ value = self._current
+ self._current -= 1
+ return value
def __str__(self):
return 'count down at {c:d} from {n:d}'.format(self._current,
diff --git a/Python/Iterators/dataset.py b/Python/Iterators/dataset.py
index c801e626..60a59981 100755
--- a/Python/Iterators/dataset.py
+++ b/Python/Iterators/dataset.py
@@ -75,10 +75,7 @@ def nr_columns(self):
@property
def column_defs(self):
'''retrieve the column definitions of the dataset'''
- col_defs = []
- for header in self._headers:
- col_defs.append(ColumnDef(header, self._type_map[header]))
- return col_defs
+ return [ColumnDef(header, self._type_map[header]) for header in self._headers]
def __len__(self):
'''retrieve the length of the data set'''
diff --git a/Python/Iterators/event_generator.py b/Python/Iterators/event_generator.py
index 91af3f6b..f49306f5 100755
--- a/Python/Iterators/event_generator.py
+++ b/Python/Iterators/event_generator.py
@@ -58,10 +58,7 @@ def __next__(self):
def event_key(event):
- if event[2] == 'on':
- return (event[0], event[1], 0)
- else:
- return (event[0], event[1], 1)
+ return (event[0], event[1], 0) if event[2] == 'on' else (event[0], event[1], 1)
if __name__ == '__main__':
from argparse import ArgumentParser
@@ -81,12 +78,11 @@ def main():
event_list = []
for event_type in options.events:
for event in EventIter(event_type, start):
- if event.start < stop:
- event_list.append(event.begin())
- if event.stop < stop:
- event_list.append(event.end())
- else:
+ if event.start >= stop:
break
+ event_list.append(event.begin())
+ if event.stop < stop:
+ event_list.append(event.end())
event_list.sort(key=event_key)
for event in event_list:
print('{0};{1} {2}'.format(str(event[0]), event[1], event[2]))
diff --git a/Python/Iterators/primes.py b/Python/Iterators/primes.py
index cd23e670..27e1289a 100755
--- a/Python/Iterators/primes.py
+++ b/Python/Iterators/primes.py
@@ -5,10 +5,7 @@
def is_prime(n):
- for i in range(2, int(math.sqrt(n)) + 1):
- if n % i == 0:
- return False
- return True
+ return all(n % i != 0 for i in range(2, int(math.sqrt(n)) + 1))
def next_prime():
@@ -21,10 +18,7 @@ def next_prime():
def main():
- if len(sys.argv) > 1:
- max_nr = int(sys.argv[1])
- else:
- max_nr = None
+ max_nr = int(sys.argv[1]) if len(sys.argv) > 1 else None
for n in next_prime():
if max_nr and n > max_nr:
break
diff --git a/Python/Iterators/primes_itertools.py b/Python/Iterators/primes_itertools.py
index 5818a607..4d24c6c8 100755
--- a/Python/Iterators/primes_itertools.py
+++ b/Python/Iterators/primes_itertools.py
@@ -11,10 +11,7 @@ def is_prime(n):
return False
elif n == 2:
return True
- for i in range(2, 1 + int(math.sqrt(n))):
- if n % i == 0:
- return False
- return True
+ return all(n % i != 0 for i in range(2, 1 + int(math.sqrt(n))))
def main():
diff --git a/Python/Iterators/primes_multiple_calls.py b/Python/Iterators/primes_multiple_calls.py
index e894b50a..396ac100 100755
--- a/Python/Iterators/primes_multiple_calls.py
+++ b/Python/Iterators/primes_multiple_calls.py
@@ -5,10 +5,7 @@
def is_prime(n):
- for i in range(2, int(math.sqrt(n)) + 1):
- if n % i == 0:
- return False
- return True
+ return all(n % i != 0 for i in range(2, int(math.sqrt(n)) + 1))
def next_prime():
diff --git a/Python/Joblib/pi_joblib.py b/Python/Joblib/pi_joblib.py
index 28246e4e..c33c7525 100755
--- a/Python/Joblib/pi_joblib.py
+++ b/Python/Joblib/pi_joblib.py
@@ -27,8 +27,9 @@ def try_hits(nr_trials):
arg_parser.add_argument('-p', dest='nr_packets', type=int, default=5,
help='number of work packets')
options = arg_parser.parse_args()
- Parallel(n_jobs=-1)(delayed(try_hits)(options.nr_trials)
- for i in range(options.nr_packets))
+ Parallel(n_jobs=-1)(
+ delayed(try_hits)(options.nr_trials) for _ in range(options.nr_packets)
+ )
pi = 4.0*float(nr_hits)/(options.nr_trials*options.nr_packets)
print('{0:.10f}'.format(pi))
sys.exit(0)
diff --git a/Python/Keras/Flatland/generate_images.py b/Python/Keras/Flatland/generate_images.py
index 955ac770..d9296475 100755
--- a/Python/Keras/Flatland/generate_images.py
+++ b/Python/Keras/Flatland/generate_images.py
@@ -66,11 +66,11 @@
x_data = np.empty((options.n, options.width, options.height),
dtype=np.uint8)
y_data = np.zeros((options.n, len(generators)), dtype=np.uint8)
- labels = list()
+ labels = []
for i in range(options.n):
data = np.zeros((options.width, options.height))
nr_objects = np.random.randint(1, options.max_objects + 1)
- for j in range(nr_objects):
+ for _ in range(nr_objects):
object_id = np.random.randint(0, len(generators))
fig = generators[object_id].create()
transformer.transform(fig)
diff --git a/Python/Logging/log_it_all.py b/Python/Logging/log_it_all.py
index 840b506c..94a20530 100755
--- a/Python/Logging/log_it_all.py
+++ b/Python/Logging/log_it_all.py
@@ -30,14 +30,8 @@ def main():
help='number of times to do stuff')
options = arg_parser.parse_args()
format_str = '%(asctime)s:%(levelname)s:%(message)s'
- if options.info:
- level = logging.INFO
- else:
- level = logging.WARNING
- if options.new_log:
- filemode = 'w'
- else:
- filemode = 'a'
+ level = logging.INFO if options.info else logging.WARNING
+ filemode = 'w' if options.new_log else 'a'
if options.log_file:
exists = os.path.exists(options.log_file)
logging.basicConfig(level=level, filename=options.log_file,
diff --git a/Python/Matlab/functions.py b/Python/Matlab/functions.py
index 25347baf..236edcb4 100644
--- a/Python/Matlab/functions.py
+++ b/Python/Matlab/functions.py
@@ -1,5 +1,2 @@
def fib(n):
- if n < 2:
- return 1
- else:
- return fib(n - 1) + fib(n - 2)
+ return 1 if n < 2 else fib(n - 1) + fib(n - 2)
diff --git a/Python/Matrices/numpy_matmul.py b/Python/Matrices/numpy_matmul.py
index bf56d913..7b569ec9 100755
--- a/Python/Matrices/numpy_matmul.py
+++ b/Python/Matrices/numpy_matmul.py
@@ -6,11 +6,11 @@
def init_matrix(n, value=None):
- if value is None:
- m = np.random.uniform(0.0, 1.0, (n, n))
- else:
- m = np.fromfunction(lambda i, j: value, (n, n))
- return m
+ return (
+ np.random.uniform(0.0, 1.0, (n, n))
+ if value is None
+ else np.fromfunction(lambda i, j: value, (n, n))
+ )
def main():
diff --git a/Python/Matrices/pure_python_matmul.py b/Python/Matrices/pure_python_matmul.py
index ddd904cb..e0363794 100755
--- a/Python/Matrices/pure_python_matmul.py
+++ b/Python/Matrices/pure_python_matmul.py
@@ -9,7 +9,7 @@ def init_matrix(n, value=None):
m = []
for i in range(n):
m.append([])
- for j in range(n):
+ for _ in range(n):
if value is not None:
m[i].append(value)
else:
diff --git a/Python/Mpi4py/exchange.py b/Python/Mpi4py/exchange.py
index 05ef4908..ff19aef3 100755
--- a/Python/Mpi4py/exchange.py
+++ b/Python/Mpi4py/exchange.py
@@ -37,23 +37,17 @@ def send_right(comm, msg, is_verbose=False):
def isend_right(comm, msg, is_verbose=False):
rank = comm.rank
size = comm.size
- left_msg = None
if rank < size - 1:
comm.isend(msg, dest=rank + 1)
- if rank > 0:
- left_msg = comm.recv(source=rank - 1)
- return left_msg
+ return comm.recv(source=rank - 1) if rank > 0 else None
def isend_left(comm, msg, is_verbose=False):
rank = comm.rank
size = comm.size
- right_msg = None
if rank > 0:
comm.isend(msg, dest=rank - 1)
- if rank < size - 1:
- right_msg = comm.recv(source=rank + 1)
- return right_msg
+ return comm.recv(source=rank + 1) if rank < size - 1 else None
def main():
@@ -81,12 +75,10 @@ def main():
comm.barrier()
msg_in = None
if rank == 0:
- msg_out = 'hello from 0'
- msg_in = comm.sendrecv(sendobj=msg_out, dest=1, source=1)
+ msg_in = comm.sendrecv(sendobj='hello from 0', dest=1, source=1)
print('rank {0}: {1}'.format(rank, msg_in))
- if rank == 1:
- msg_out = 'hello from 1'
- msg_in = comm.sendrecv(sendobj=msg_out, dest=0, source=0)
+ elif rank == 1:
+ msg_in = comm.sendrecv(sendobj='hello from 1', dest=0, source=0)
print('rank {0}: {1}'.format(rank, msg_in))
return 0
diff --git a/Python/Mpi4py/hello.py b/Python/Mpi4py/hello.py
index eb1675d4..e9e6b54e 100755
--- a/Python/Mpi4py/hello.py
+++ b/Python/Mpi4py/hello.py
@@ -39,8 +39,8 @@
if rank == 0:
print('# core-level placement')
comm.Barrier()
+ msg = 'rank {0:02d} running on core(s) {1:s} of {2:d}'
for _ in range(nr_iters):
cores = process.cpu_affinity()
- msg = 'rank {0:02d} running on core(s) {1:s} of {2:d}'
print(msg.format(rank, '.'.join(str(c) for c in cores), nr_cores))
time.sleep(sleep_time)
diff --git a/Python/Mpi4py/reduce.py b/Python/Mpi4py/reduce.py
index ed494630..9804f066 100755
--- a/Python/Mpi4py/reduce.py
+++ b/Python/Mpi4py/reduce.py
@@ -12,10 +12,7 @@
root = 0
send_buffer = (rank + 1)*np.linspace(0.0, 1.0, nr_points)
- if rank == root:
- recv_buffer = np.zeros(nr_points)
- else:
- recv_buffer = None
+ recv_buffer = np.zeros(nr_points) if rank == root else None
comm.Reduce(send_buffer, recv_buffer,
op=MPI.SUM, root=root)
if rank == root:
diff --git a/Python/NetworkX/compute_leaf_path_lengths.py b/Python/NetworkX/compute_leaf_path_lengths.py
index a2417f98..a81be2e7 100755
--- a/Python/NetworkX/compute_leaf_path_lengths.py
+++ b/Python/NetworkX/compute_leaf_path_lengths.py
@@ -11,10 +11,7 @@ def is_leaf(tree, node):
def leaf_path_lengths(tree):
path_lengths = nx.shortest_path_length(tree, '1', weight='weight')
- remove = []
- for node in path_lengths:
- if not is_leaf(tree, node):
- remove.append(node)
+ remove = [node for node in path_lengths if not is_leaf(tree, node)]
for node in remove:
del path_lengths[node]
return path_lengths
diff --git a/Python/NetworkX/generate_random_tree.py b/Python/NetworkX/generate_random_tree.py
index 22388e46..1b0b620e 100755
--- a/Python/NetworkX/generate_random_tree.py
+++ b/Python/NetworkX/generate_random_tree.py
@@ -17,7 +17,7 @@ def random_subtree(G, root, max_branch, max_height):
if root:
nr_branches = random.randrange(0, max_branch + 1)
for i in range(1, nr_branches + 1):
- node = root + '.' + str(i)
+ node = f'{root}.{str(i)}'
G.add_edge(root, node)
random_subtree(G, node, max_branch, max_height - 1)
else:
diff --git a/Python/NetworkX/par_compute_leaf_lengths.py b/Python/NetworkX/par_compute_leaf_lengths.py
index 2114dcb8..2a48c894 100755
--- a/Python/NetworkX/par_compute_leaf_lengths.py
+++ b/Python/NetworkX/par_compute_leaf_lengths.py
@@ -15,18 +15,18 @@ def _leaf_path_lengths(tree, node):
global pool
if _is_leaf(tree, node):
return {node: 0.0}
- else:
- path_lengths = {}
- results = {}
- for child in tree.neighbors_iter(node):
- results[child] = _leaf_path_lengths(tree, child)
- for child in tree.neighbors_iter(node):
- weight = tree[node][child]['weight']
+ path_lengths = {}
+ results = {
+ child: _leaf_path_lengths(tree, child)
+ for child in tree.neighbors_iter(node)
+ }
+ for child in tree.neighbors_iter(node):
+ weight = tree[node][child]['weight']
# lengths = results[child].get()
- lengths = results[child]
- for leaf in lengths:
- path_lengths[leaf] = lengths[leaf] + weight
- return path_lengths
+ lengths = results[child]
+ for leaf in lengths:
+ path_lengths[leaf] = lengths[leaf] + weight
+ return path_lengths
def leaf_path_lengths(tree):
diff --git a/Python/Numba/Primes/primes_numba.py b/Python/Numba/Primes/primes_numba.py
index 1218bf0a..117d7c9e 100644
--- a/Python/Numba/Primes/primes_numba.py
+++ b/Python/Numba/Primes/primes_numba.py
@@ -5,17 +5,16 @@
def primes(kmax):
p = np.zeros(10000)
result = []
- if kmax > 10000:
- kmax = 10000
+ kmax = min(kmax, 10000)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/Numba/Primes/primes_numba_array.py b/Python/Numba/Primes/primes_numba_array.py
index f0b69273..a87fd0a1 100644
--- a/Python/Numba/Primes/primes_numba_array.py
+++ b/Python/Numba/Primes/primes_numba_array.py
@@ -5,17 +5,16 @@
def primes(kmax):
p = array('i', [0]*10000)
result = []
- if kmax > 10000:
- kmax = 10000
+ kmax = min(kmax, 10000)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/Numba/Primes/primes_vanilla.py b/Python/Numba/Primes/primes_vanilla.py
index 5656e4bd..93052007 100644
--- a/Python/Numba/Primes/primes_vanilla.py
+++ b/Python/Numba/Primes/primes_vanilla.py
@@ -4,17 +4,16 @@
def primes(kmax):
p = np.zeros(10000)
result = []
- if kmax > 10000:
- kmax = 10000
+ kmax = min(kmax, 10000)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/Numba/Primes/primes_vanilla_array.py b/Python/Numba/Primes/primes_vanilla_array.py
index 1275326c..ac6a118c 100644
--- a/Python/Numba/Primes/primes_vanilla_array.py
+++ b/Python/Numba/Primes/primes_vanilla_array.py
@@ -4,17 +4,16 @@
def primes(kmax):
p = array('i', [0]*10000)
result = []
- if kmax > 10000:
- kmax = 10000
+ kmax = min(kmax, 10000)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/Numpy/data_writer.py b/Python/Numpy/data_writer.py
index a089555b..51a1485c 100755
--- a/Python/Numpy/data_writer.py
+++ b/Python/Numpy/data_writer.py
@@ -28,10 +28,7 @@ def linear(x, a=0.4, b=1.2):
f = np.vectorize(linear)
y = f(x, options.a, options.b) + options.sigma*np.random.randn(len(x))
-if options.out:
- out = open(options.out, 'w')
-else:
- out = sys.stdout
+out = open(options.out, 'w') if options.out else sys.stdout
out.write(names + '\n')
for i in range(len(x)):
out.write('{x:.7e},{y:.7e}\n'.format(x=x[i], y=y[i]))
diff --git a/Python/OsFileSystem/list_files.py b/Python/OsFileSystem/list_files.py
index d6868f95..6551cf05 100755
--- a/Python/OsFileSystem/list_files.py
+++ b/Python/OsFileSystem/list_files.py
@@ -16,8 +16,7 @@
options = arg_parser.parse_args()
for directory, _, files in os.walk(options.dir):
if options.verbose:
- print("### checking directory '{}'".format(directory),
- file=sys.stderr)
+ print(f"### checking directory '{directory}'", file=sys.stderr)
for file_name in files:
_, ext = os.path.splitext(file_name)
if ext == options.ext:
diff --git a/Python/ParameterOptimization/JuliaJobs/julia_optimize.py b/Python/ParameterOptimization/JuliaJobs/julia_optimize.py
index 10246db7..5c03c155 100755
--- a/Python/ParameterOptimization/JuliaJobs/julia_optimize.py
+++ b/Python/ParameterOptimization/JuliaJobs/julia_optimize.py
@@ -14,8 +14,7 @@ def function(params):
chunk = int(chunk)
# ppn ranges from 0 to 35 (inclusive)
ppn = 1 + int(ppn)
- omp_env = (f'schedule={schedule},chunk={int(chunk)},' +
- f'OMP_NUM_THREADS={ppn}')
+ omp_env = (f'schedule={schedule},chunk={chunk},' + f'OMP_NUM_THREADS={ppn}')
cmd = ['qsub', '-l', f'nodes=1:ppn={ppn}:haswell',
'-v', omp_env, 'julia.pbs']
process = subprocess.run(cmd, stdout=subprocess.PIPE,
@@ -26,10 +25,9 @@ def function(params):
while not output_file.exists():
time.sleep(3)
print(f'### info: job {job_id} finished', file=sys.stderr)
- runtimes = list()
+ runtimes = []
with open(f'julia.pbs.time{job_id}', 'r') as time_file:
- for line in time_file:
- runtimes.append(float(time_file.readline()))
+ runtimes.extend(float(time_file.readline()) for _ in time_file)
runtime = sum(runtimes)/len(runtimes)
return {
'loss': runtime, 'schedule': schedule, 'chunk': chunk,
diff --git a/Python/ParameterOptimization/JuliaProcesses/julia_optimize.py b/Python/ParameterOptimization/JuliaProcesses/julia_optimize.py
index afbbd983..2099f7d2 100755
--- a/Python/ParameterOptimization/JuliaProcesses/julia_optimize.py
+++ b/Python/ParameterOptimization/JuliaProcesses/julia_optimize.py
@@ -18,7 +18,7 @@ def function(params):
environ['OMP_SCHEDULE'] = f'{schedule},{chunk}'
environ['OMP_NUM_THREADS'] = f'{ppn}'
cmd = ['./julia_omp.exe 4096']
- runtimes = list()
+ runtimes = []
for _ in range(3):
process = subprocess.run(cmd, stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
diff --git a/Python/Paramiko/ls.py b/Python/Paramiko/ls.py
index 8e4c27b6..4ee8ae02 100755
--- a/Python/Paramiko/ls.py
+++ b/Python/Paramiko/ls.py
@@ -24,7 +24,7 @@ def connect(host, user):
ssh = connect(options.host, options.user)
cmd = 'ls -l'
if options.dir:
- cmd += ' ' + options.dir
+ cmd += f' {options.dir}'
_, stdout, stderr = ssh.exec_command(cmd)
for line in stdout:
print(line.rstrip())
diff --git a/Python/PhraseIndexing/indexer.py b/Python/PhraseIndexing/indexer.py
index 80949694..7226a136 100755
--- a/Python/PhraseIndexing/indexer.py
+++ b/Python/PhraseIndexing/indexer.py
@@ -37,8 +37,7 @@ def parse(self, text_file_name, phrases, show_progress=False):
self._phrases = phrases
with open(text_file_name, 'r', newline='') as text_file:
word = ''
- buffer = text_file.read(self._read_length)
- while buffer:
+ while buffer := text_file.read(self._read_length):
for character in buffer:
if character.isalpha():
word += character
@@ -55,7 +54,6 @@ def parse(self, text_file_name, phrases, show_progress=False):
self._line_nr % self._progress_line_nr == 0):
msg = 'parsing line {0:d}\n'
sys.stderr.write(msg.format(self._line_nr))
- buffer = text_file.read(self._read_length)
return self._phrases
if __name__ == '__main__':
@@ -79,8 +77,7 @@ def parse(self, text_file_name, phrases, show_progress=False):
max_phrase_len = 0
with open(options.phrases, 'r') as phrases_file:
for line in phrases_file:
- phrase = line.strip()
- if phrase:
+ if phrase := line.strip():
phrases[line.strip()] = []
max_phrase_len = max(len(line.split(' ')), max_phrase_len)
if options.verbose:
diff --git a/Python/Plotly/add_data_stream_plot.py b/Python/Plotly/add_data_stream_plot.py
index 9a5e585f..3602fee4 100755
--- a/Python/Plotly/add_data_stream_plot.py
+++ b/Python/Plotly/add_data_stream_plot.py
@@ -11,12 +11,9 @@
stream = py.Stream(stream_id)
stream.open()
-i = 0
N = 200
-while i < N:
- i += 1
-
+for i in range(1, N + 1):
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
y = math.sin(2.0*math.pi*i/15.0) + random.gauss(0.0, 0.2)
stream.write(dict(x=x, y=y))
diff --git a/Python/Profiling/primes_cprof.py b/Python/Profiling/primes_cprof.py
index 61342416..3b4ff77a 100755
--- a/Python/Profiling/primes_cprof.py
+++ b/Python/Profiling/primes_cprof.py
@@ -8,17 +8,16 @@ def primes(kmax):
max_size = 1000000
p = array('i', [0]*max_size)
result = []
- if kmax > max_size:
- kmax = max_size
+ kmax = min(kmax, max_size)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/Profiling/primes_lprof.py b/Python/Profiling/primes_lprof.py
index e571d7a5..6e0c3fa4 100755
--- a/Python/Profiling/primes_lprof.py
+++ b/Python/Profiling/primes_lprof.py
@@ -9,17 +9,16 @@ def primes(kmax):
max_size = 1000000
p = array('i', [0]*max_size)
result = []
- if kmax > max_size:
- kmax = max_size
+ kmax = min(kmax, max_size)
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
- i = i + 1
+ i += 1
if i == k:
p[k] = n
- k = k + 1
+ k += 1
result.append(n)
n = n + 1
return result
diff --git a/Python/PyParsing/macro_expander.py b/Python/PyParsing/macro_expander.py
index 0c15b33b..0237a650 100755
--- a/Python/PyParsing/macro_expander.py
+++ b/Python/PyParsing/macro_expander.py
@@ -83,10 +83,10 @@ def main():
expander.add_macro(macro_def.__name__, macro_def)
print(expander.expand(text))
except UndefinedMacroError as error:
- sys.stderr.write('### error: ' + str(error) + '\n')
+ sys.stderr.write(f'### error: {str(error)}' + '\n')
sys.exit(2)
except Exception as error:
- sys.stderr.write('### error: ' + str(error) + '\n')
+ sys.stderr.write(f'### error: {str(error)}' + '\n')
sys.exit(1)
if __name__ == '__main__':
diff --git a/Python/PyParsing/macro_tests.py b/Python/PyParsing/macro_tests.py
index 60627c98..d1ef28e0 100755
--- a/Python/PyParsing/macro_tests.py
+++ b/Python/PyParsing/macro_tests.py
@@ -13,7 +13,7 @@ def setUp(self):
self._expander.add_macro('lower', lambda x: x.lower())
self._expander.add_macro('gjb', lambda: 'Geert Jan Bex')
self._expander.add_macro('repeat', lambda x, n: int(n)*x)
- self._expander.add_macro('cat', lambda x, y: x + '-' + y)
+ self._expander.add_macro('cat', lambda x, y: f'{x}-{y}')
self._expander.add_macro('add', lambda x, y: str(int(x) + int(y)))
def test_upper(self):
diff --git a/Python/PyParsing/newick_converter.py b/Python/PyParsing/newick_converter.py
index 058a25ee..f2f959a1 100755
--- a/Python/PyParsing/newick_converter.py
+++ b/Python/PyParsing/newick_converter.py
@@ -38,15 +38,10 @@ class RelationalNewickWriter(RelationalWriter):
def node_attr(self, node):
'''formats node attributes, if any'''
- attr = ''
- if node.label is not None:
- attr += '\t{0}'.format(node.label)
- else:
- attr += '\tNone'
- if node.length is not None:
- attr += '\t{0}'.format(node.length)
- else:
- attr += '\tNone'
+ attr = '' + (
+ '\t{0}'.format(node.label) if node.label is not None else '\tNone'
+ )
+ attr += '\t{0}'.format(node.length) if node.length is not None else '\tNone'
return attr
diff --git a/Python/PyParsing/node_utils.py b/Python/PyParsing/node_utils.py
index f715deb9..581713ac 100755
--- a/Python/PyParsing/node_utils.py
+++ b/Python/PyParsing/node_utils.py
@@ -17,26 +17,23 @@ def depth(node):
def depth_first_iterator(node):
'''returns an depth-first itreator over the node and its children'''
- if node is not None:
- node_stack = [(node, -1)]
- while len(node_stack) > 0:
- node, child_index = node_stack.pop()
- if child_index == -1:
- if not node.is_leaf():
- node_stack.append((node, child_index + 1))
- yield node
- elif child_index < node.nr_children():
+ if node is None:
+ return
+ node_stack = [(node, -1)]
+ while node_stack:
+ node, child_index = node_stack.pop()
+ if child_index == -1:
+ if not node.is_leaf():
node_stack.append((node, child_index + 1))
- node_stack.append((node.child(child_index), -1))
+ yield node
+ elif child_index < node.nr_children():
+ node_stack.append((node, child_index + 1))
+ node_stack.append((node.child(child_index), -1))
def nr_leaf_nodes(start_node):
'''returns the number of leaf nodes starting form the given node'''
- nr = 0
- for node in depth_first_iterator(start_node):
- if node.is_leaf():
- nr += 1
- return nr
+ return sum(1 for node in depth_first_iterator(start_node) if node.is_leaf())
class DepthTest(unittest.TestCase):
@@ -72,36 +69,28 @@ def test_empty_tree(self):
parser = NodeParser()
parser.parse('()')
tree = parser.node()
- nodes = []
- for node in depth_first_iterator(tree):
- nodes.append(node.name)
+ nodes = [node.name for node in depth_first_iterator(tree)]
self.assertEqual(nodes, [])
def test_single_node(self):
parser = NodeParser()
parser.parse('(c1)')
tree = parser.node()
- nodes = []
- for node in depth_first_iterator(tree):
- nodes.append(node.name)
+ nodes = [node.name for node in depth_first_iterator(tree)]
self.assertEqual(nodes, ['c1'])
def test_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2) (c3 ((c4) (c5))) (c6)))')
tree = parser.node()
- nodes = []
- for node in depth_first_iterator(tree):
- nodes.append(node.name)
+ nodes = [node.name for node in depth_first_iterator(tree)]
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4', 'c5', 'c6'])
def test_deep_tree(self):
parser = NodeParser()
parser.parse('(c1 ((c2 ((c3 ((c4)))))))')
tree = parser.node()
- nodes = []
- for node in depth_first_iterator(tree):
- nodes.append(node.name)
+ nodes = [node.name for node in depth_first_iterator(tree)]
self.assertEqual(nodes, ['c1', 'c2', 'c3', 'c4'])
diff --git a/Python/PyParsing/tree_converter.py b/Python/PyParsing/tree_converter.py
index d9637fef..37619efc 100755
--- a/Python/PyParsing/tree_converter.py
+++ b/Python/PyParsing/tree_converter.py
@@ -55,7 +55,7 @@ def write(self, node):
repr = '{0}\t{1}\t{2}'.format(id_dict[node], None, node.name)
repr += self.node_attr(node) + '\n'
queue = [node]
- while len(queue) > 0:
+ while queue:
current = queue.pop(0)
for child in current.children():
current_id += 1
diff --git a/Python/SentenceCounter/count_sentences.py b/Python/SentenceCounter/count_sentences.py
index a40b565f..effb580d 100755
--- a/Python/SentenceCounter/count_sentences.py
+++ b/Python/SentenceCounter/count_sentences.py
@@ -5,11 +5,12 @@
in a tehcnical sense.
"""
+
from argparse import ArgumentParser
import os.path
import sys
-terminators = set(['.', '?', '!'])
+terminators = {'.', '?', '!'}
def extract_prefix(file_name, start_pos, end_pos):
@@ -44,11 +45,10 @@ def extract_suffix(file_name, start_pos, end_pos):
c = file.read(1)
if c in terminators:
break
- else:
- suffix_str = c + suffix_str
- current_pos = file.tell()
- file.seek(current_pos - 2)
- end_pos -= 1
+ suffix_str = c + suffix_str
+ current_pos = file.tell()
+ file.seek(current_pos - 2)
+ end_pos -= 1
return (suffix_str, end_pos)
diff --git a/Python/SentenceCounter/count_sentences_par.py b/Python/SentenceCounter/count_sentences_par.py
index 499e9e18..ede939ee 100755
--- a/Python/SentenceCounter/count_sentences_par.py
+++ b/Python/SentenceCounter/count_sentences_par.py
@@ -5,12 +5,13 @@
in a technical sense.
"""
+
from argparse import ArgumentParser
import os.path
import sys
from mpi4py import MPI
-terminators = set(['.', '?', '!'])
+terminators = {'.', '?', '!'}
def extract_prefix(file_name, start_pos, end_pos):
@@ -45,11 +46,10 @@ def extract_suffix(file_name, start_pos, end_pos):
c = file.read(1)
if c in terminators:
break
- else:
- suffix_str = c + suffix_str
- current_pos = file.tell()
- file.seek(current_pos - 2)
- end_pos -= 1
+ suffix_str = c + suffix_str
+ current_pos = file.tell()
+ file.seek(current_pos - 2)
+ end_pos -= 1
return (suffix_str, end_pos)
@@ -82,10 +82,7 @@ def main():
file_size = os.path.getsize(options.file)
chunck_size = file_size//size
start_pos = chunck_size*rank
- if rank + 1 < size:
- end_pos = start_pos + chunck_size - 1
- else:
- end_pos = file_size - 1
+ end_pos = start_pos + chunck_size - 1 if rank + 1 < size else file_size - 1
if options.is_verbose:
msg = "rank {0} reading '{1}' from {2} to {3}\n"
sys.stderr.write(msg.format(rank, options.file, start_pos, end_pos))
diff --git a/Python/SystemsProgramming/config_args.py b/Python/SystemsProgramming/config_args.py
index b68dfea1..815fe966 100755
--- a/Python/SystemsProgramming/config_args.py
+++ b/Python/SystemsProgramming/config_args.py
@@ -9,13 +9,13 @@ def dump_options(settings):
print(f'{key} = {value}')
if __name__ == '__main__':
- config_parser = ConfigParser()
+ config_parser = ConfigParser()
sys_conf_path = Path('system.conf')
if sys_conf_path.exists():
config_parser.read('system.conf')
options = dict(config_parser.items('DEFAULT'))
else:
- options = dict()
+ options = {}
arg_parser = ArgumentParser(description='experiment with '
'configuration files and '
'command line arguments')
@@ -24,7 +24,7 @@ def dump_options(settings):
if argv.conf:
config_parser = ConfigParser()
config_parser.read(argv.conf)
- options.update(dict(config_parser.items('DEFAULT')))
+ options |= dict(config_parser.items('DEFAULT'))
arg_parser.set_defaults(**options)
arg_parser.add_argument('--action', help='action to do')
arg_parser.add_argument('--name', help='name for action')
diff --git a/Python/Typing/correct.py b/Python/Typing/correct.py
index 4bc8e2d4..8d927b14 100755
--- a/Python/Typing/correct.py
+++ b/Python/Typing/correct.py
@@ -4,10 +4,7 @@
def fib(n: int) -> int:
- if n == 0 or n == 1:
- return 1
- else:
- return fib(n - 1) + fib(n - 2)
+ return 1 if n in {0, 1} else fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
n = int(sys.argv[1]) # type: int
diff --git a/Python/Typing/dict_correct.py b/Python/Typing/dict_correct.py
index 8b73c952..3793b5ce 100755
--- a/Python/Typing/dict_correct.py
+++ b/Python/Typing/dict_correct.py
@@ -6,7 +6,7 @@
def word_count(text: str) -> Dict[str, int]:
- counts = dict() # type: Dict[str, int]
+ counts = {}
words = re.split(r'\W+', text)
for word in words:
word = word.lower()
diff --git a/Python/Typing/dict_incorrect_01.py b/Python/Typing/dict_incorrect_01.py
index e205ecbf..0e603253 100755
--- a/Python/Typing/dict_incorrect_01.py
+++ b/Python/Typing/dict_incorrect_01.py
@@ -6,7 +6,7 @@
def word_count(text: str) -> Dict[str, int]:
- counts = dict() # type: Dict[str, int]
+ counts = {}
words = re.split(r'\W+', text)
nr_words = 0
for word in words:
@@ -16,8 +16,8 @@ def word_count(text: str) -> Dict[str, int]:
counts[word] += 1
nr_words += 1
nr_words -= counts.pop('')
- for word, count in counts.items():
- counts[word] /= nr_words
+ for value in counts.values():
+ value /= nr_words
return counts
if __name__ == '__main__':
diff --git a/Python/Typing/incorrect_01.py b/Python/Typing/incorrect_01.py
index 29901863..9208b2a6 100644
--- a/Python/Typing/incorrect_01.py
+++ b/Python/Typing/incorrect_01.py
@@ -4,10 +4,7 @@
def fib(n: int) -> int:
- if n == 0 or n == 1:
- return 1
- else:
- return fib(n - 1) + fib(n - 2)
+ return 1 if n in {0, 1} else fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
n = sys.argv[1]
diff --git a/Python/Typing/incorrect_02.py b/Python/Typing/incorrect_02.py
index 8e9cfd98..345404bb 100644
--- a/Python/Typing/incorrect_02.py
+++ b/Python/Typing/incorrect_02.py
@@ -4,10 +4,7 @@
def fib(n: int) -> int:
- if n == 0 or n == 1:
- return 1
- else:
- return fib(n - 1) + fib(n - 2)
+ return 1 if n in {0, 1} else fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
n = int(sys.argv[1]) # type: str
diff --git a/Python/Typing/incorrect_03.py b/Python/Typing/incorrect_03.py
index eddc65d0..41c732a3 100755
--- a/Python/Typing/incorrect_03.py
+++ b/Python/Typing/incorrect_03.py
@@ -4,10 +4,7 @@
def fib(n: int) -> int:
- if n == 0 or n == 1:
- return 1
- else:
- return fib(n - 1) + fib(n - 2)
+ return 1 if n in {0, 1} else fib(n - 1) + fib(n - 2)
if __name__ == '__main__':
n = int(sys.argv[1])
diff --git a/Python/Typing/people_incorrect.py b/Python/Typing/people_incorrect.py
index 13919ccb..73e9d6a9 100755
--- a/Python/Typing/people_incorrect.py
+++ b/Python/Typing/people_incorrect.py
@@ -27,9 +27,7 @@ def average_age(people: List[Person]) -> float:
if __name__ == '__main__':
- people: List[Person] = list()
- people.append(Person('alice', 43))
- people.append('bob')
+ people: List[Person] = [Person('alice', 43), 'bob']
people.append(Person('carol', '17'))
for person in people:
print(person)
diff --git a/Python/Unittest/Fixtures/tests.py b/Python/Unittest/Fixtures/tests.py
index d0205b5c..9e834000 100755
--- a/Python/Unittest/Fixtures/tests.py
+++ b/Python/Unittest/Fixtures/tests.py
@@ -110,9 +110,7 @@ def test_projects_start_date(self):
ORDER BY project_name ASC;''',
('2014-11-01', )
)
- projects = []
- for row in self._cursor:
- projects.append(row['project_name'])
+ projects = [row['project_name'] for row in self._cursor]
self.assertListEqual(expected_projects, projects)
def test_unassigned_researchers(self):
@@ -127,9 +125,7 @@ def test_unassigned_researchers(self):
EXCEPT SELECT researcher_id
FROM staff_assignments);'''
)
- researchers = []
- for row in self._cursor:
- researchers.append(row['first_name'])
+ researchers = [row['first_name'] for row in self._cursor]
self.assertListEqual(expected_researchers, researchers)
def test_assigned_projects(self):
@@ -160,7 +156,7 @@ def test_samples_per_project(self):
for row in self._cursor:
self.assertEqual(len(expected_samples[row['project_name']]),
row['nr_samples'])
- for project_name in expected_samples:
+ for project_name, value in expected_samples.items():
self._cursor.execute(
'''SELECT s.organism AS organism
FROM projects AS p, samples AS s
@@ -168,10 +164,8 @@ def test_samples_per_project(self):
p.project_id = s.project_id;''',
(project_name, )
)
- samples = set()
- for row in self._cursor:
- samples.add(row['organism'])
- self.assertSetEqual(expected_samples[project_name], samples)
+ samples = {row['organism'] for row in self._cursor}
+ self.assertSetEqual(value, samples)
class ConstraintsTest(unittest.TestCase):
@@ -256,16 +250,12 @@ def test_researcher_delete_trigger(self):
self._cursor.execute(
'''SELECT COUNT(*) FROM staff_assignments;'''
)
- nr_rows = 0
- for row in self._cursor:
- nr_rows += 1
+ nr_rows = sum(1 for _ in self._cursor)
self.assertEqual(expected_nr_rows, nr_rows)
self._cursor.execute(
'''SELECT COUNT(*) FROM project_staffing;'''
)
- nr_rows = 0
- for row in self._cursor:
- nr_rows += 1
+ nr_rows = sum(1 for _ in self._cursor)
self.assertEqual(expected_nr_rows, nr_rows)
def test_project_delete_trigger(self):
@@ -283,9 +273,7 @@ def test_project_delete_trigger(self):
FROM projects AS p, staff_assignments AS s
WHERE p.project_id = s.project_id;'''
)
- staffed_projects = set()
- for row in self._cursor:
- staffed_projects.add(row['project_name'])
+ staffed_projects = {row['project_name'] for row in self._cursor}
self.assertSetEqual(expected_staffed_projects, staffed_projects)
def test_sample_update_trigger(self):
diff --git a/Python/Unittest/PyTest/Fixtures/test_wc.py b/Python/Unittest/PyTest/Fixtures/test_wc.py
index 470f3bc7..070c4bc1 100644
--- a/Python/Unittest/PyTest/Fixtures/test_wc.py
+++ b/Python/Unittest/PyTest/Fixtures/test_wc.py
@@ -7,7 +7,7 @@ def text_file():
file_path = Path('my_text.txt')
with open(file_path, 'w') as out_file:
for line_nr in range(1, 11):
- for word_nr in range(1, line_nr + 1):
+ for _ in range(1, line_nr + 1):
print('bla', file=out_file, end=' ')
print('', file=out_file)
yield file_path
@@ -22,8 +22,6 @@ def test_wc_l(text_file):
def test_wc_w(text_file):
output = check_output(['wc', '-w', text_file])
nr_lines = 10
- nr_words = 0
- for words in range(1, nr_lines + 1):
- nr_words += words
+ nr_words = sum(range(1, nr_lines + 1))
lines, _ = output.decode(encoding='utf-8').split()
assert int(lines) == nr_words
diff --git a/Python/Unittest/PyTest/Simple/fac.py b/Python/Unittest/PyTest/Simple/fac.py
index f58a03ed..08609674 100755
--- a/Python/Unittest/PyTest/Simple/fac.py
+++ b/Python/Unittest/PyTest/Simple/fac.py
@@ -1,14 +1,11 @@
#!/usr/bin/env python
def bad_fac(n):
- if n < 2:
- return 1
- else:
- return n*fac(n - 1)
+ return 1 if n < 2 else n*fac(n - 1)
def fac(n):
- if n == 0 or n == 1:
+ if n in [0, 1]:
return 1;
elif n >= 2:
result = 1
diff --git a/Python/Unittest/Simple/func_lib.py b/Python/Unittest/Simple/func_lib.py
index 24170625..c35f237c 100755
--- a/Python/Unittest/Simple/func_lib.py
+++ b/Python/Unittest/Simple/func_lib.py
@@ -39,12 +39,7 @@ def is_prime(n):
'''returns True when the given number of prime, false otherwise'''
if n < 0:
raise InvalidArgumentException('number should be positive')
- factor = 2
- while factor <= int(math.sqrt(n)):
- if n % factor == 0:
- return False
- factor += 1
- return True
+ return all(n % factor != 0 for factor in range(2, int(math.sqrt(n)) + 1))
# correct implementation for n >= 0
# return n > 1
# for n < 0, an exception might be thrown
@@ -52,11 +47,7 @@ def is_prime(n):
def primes(n):
'''returns a list of primes less than or equal to the given nuber'''
- prime_list = []
- for i in range(n + 1):
- if is_prime(i):
- prime_list.append(i)
- return prime_list
+ return [i for i in range(n + 1) if is_prime(i)]
if __name__ == '__main__':
diff --git a/Python/WebScraping/link_web.py b/Python/WebScraping/link_web.py
index f6614a86..1d80961f 100755
--- a/Python/WebScraping/link_web.py
+++ b/Python/WebScraping/link_web.py
@@ -12,33 +12,33 @@ def show_links(soup, out=sys.stderr):
print("Opened start page '{0}'".format(soup.title.string), file=out)
links = soup.find_all("a")
for link in links:
- href = link.get('href')
- if href:
+ if href := link.get('href'):
print('\t{0}'.format(href), file=out)
def process_page(pages_to_do, pages_done, max_level, graph, verbose=False):
- if pages_to_do:
- page_url, level = pages_to_do.popleft()
- if level <= max_level:
- if verbose:
- print('{0}: {1}'.format(page_url, level))
- pages_done.add(page_url)
- try:
- page = urllib.request.urlopen(page_url)
- soup = BeautifulSoup(page, 'html5lib')
- links = soup.find_all("a")
- for link in links:
- href = link.get('href')
- if href and href.startswith('http'):
- if href not in pages_done:
- pages_to_do.append((href, level + 1))
- graph.add_edge(page_url, href)
- except urllib.error.HTTPError:
- print('# warning: can not handle {0}'.format(page_url),
- file=sys.stderr)
- except urllib.error.URLError:
- print('# warning: can not handle {0}'.format(page_url),
- file=sys.stderr)
+ if not pages_to_do:
+ return
+ page_url, level = pages_to_do.popleft()
+ if level <= max_level:
+ if verbose:
+ print('{0}: {1}'.format(page_url, level))
+ pages_done.add(page_url)
+ try:
+ page = urllib.request.urlopen(page_url)
+ soup = BeautifulSoup(page, 'html5lib')
+ links = soup.find_all("a")
+ for link in links:
+ href = link.get('href')
+ if href and href.startswith('http'):
+ if href not in pages_done:
+ pages_to_do.append((href, level + 1))
+ graph.add_edge(page_url, href)
+ except urllib.error.HTTPError:
+ print('# warning: can not handle {0}'.format(page_url),
+ file=sys.stderr)
+ except urllib.error.URLError:
+ print('# warning: can not handle {0}'.format(page_url),
+ file=sys.stderr)
if __name__ == '__main__':
arg_parser = ArgumentParser(description='create graph of hyperlinks')
diff --git a/Python/XmlGenerator/gen_xml.py b/Python/XmlGenerator/gen_xml.py
index 10ef38e7..d37613db 100755
--- a/Python/XmlGenerator/gen_xml.py
+++ b/Python/XmlGenerator/gen_xml.py
@@ -54,10 +54,7 @@ def random(self):
for _ in range(self._size):
tag = self._tags.random()
element = doc.createElement(tag)
- if node_list:
- parent = random.choice(node_list)
- else:
- parent = doc
+ parent = random.choice(node_list) if node_list else doc
parent.appendChild(element)
node_list.append(element)
for element in node_list:
diff --git a/Visualization/Synthetic/create_dataset.py b/Visualization/Synthetic/create_dataset.py
index bdb24243..ff5873c5 100755
--- a/Visualization/Synthetic/create_dataset.py
+++ b/Visualization/Synthetic/create_dataset.py
@@ -158,7 +158,7 @@ def compute_vector_field(h5file, centers, xs, ys, zs, max_field=50.0,
options.file)
xdmf.create_centers()
base_name, _ = os.path.splitext(options.file)
- xdmf_file_name = base_name + '_centers.xdmf'
+ xdmf_file_name = f'{base_name}_centers.xdmf'
xdmf.to_xml(xdmf_file_name)
xdmf.to_xml(xdmf_file_name)
if options.particle_data:
@@ -166,7 +166,7 @@ def compute_vector_field(h5file, centers, xs, ys, zs, max_field=50.0,
options.file)
xdmf.create_particles()
base_name, _ = os.path.splitext(options.file)
- xdmf_file_name = base_name + '_particles.xdmf'
+ xdmf_file_name = f'{base_name}_particles.xdmf'
xdmf.to_xml(xdmf_file_name)
if options.scalar_field_data:
xdmf = Xdmf(options.centers, options.particles, options.points,
@@ -174,7 +174,7 @@ def compute_vector_field(h5file, centers, xs, ys, zs, max_field=50.0,
xdmf.create_field_geometry()
xdmf.create_scalar_field()
base_name, _ = os.path.splitext(options.file)
- xdmf_file_name = base_name + '_scalar_field.xdmf'
+ xdmf_file_name = f'{base_name}_scalar_field.xdmf'
xdmf.to_xml(xdmf_file_name)
if options.vector_field_data:
xdmf = Xdmf(options.centers, options.particles, options.points,
@@ -182,5 +182,5 @@ def compute_vector_field(h5file, centers, xs, ys, zs, max_field=50.0,
xdmf.create_field_geometry()
xdmf.create_vector_field()
base_name, _ = os.path.splitext(options.file)
- xdmf_file_name = base_name + '_vector_field.xdmf'
+ xdmf_file_name = f'{base_name}_vector_field.xdmf'
xdmf.to_xml(xdmf_file_name)