Skip to content

Commit

Permalink
fixing style refs #12
Browse files Browse the repository at this point in the history
  • Loading branch information
namdre committed Dec 22, 2024
1 parent 10ed703 commit be6d06d
Show file tree
Hide file tree
Showing 8 changed files with 23 additions and 23 deletions.
1 change: 1 addition & 0 deletions tests/netedit/viewPositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class externLaneTop:
x = 42
y = 136


class overlappedTest:
x = 500
y = 218
Expand Down
11 changes: 5 additions & 6 deletions tools/assign/duaIterate_routeCosts.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@
>>> import duaIterate_routeCosts as rc
>>> r = rc.load('.', range(47, 50)) # load iteration numbers 47,48,49
>>> f = rc.filter(r, via=['531478184','25483415']) # filter routes that pass both edges
47 Costs: count 1124, min 896.14 (126868_8), max 1960.17 (225725_1), mean 1355.41, Q1 1257.53, median 1325.86, Q3 1434.97
48 Costs: count 1124, min 896.47 (126868_8.1), max 1993.74 (225725_1), mean 1355.02, Q1 1257.32, median 1323.68, Q3 1434.09
49 Costs: count 1124, min 898.51 (126868_8.1), max 1958.68 (225725_1), mean 1355.00, Q1 1257.93, median 1323.39, Q3 1434.92
47 Costs: count 1124, min 896.14 (veh0), max 1960.17 (veh1), mean 1355.41, Q1 1257.53, median 1325.86, Q3 1434.97
48 Costs: count 1124, min 896.47 (veh0), max 1993.74 (veh2), mean 1355.02, Q1 1257.32, median 1323.68, Q3 1434.09
49 Costs: count 1124, min 898.51 (veh3), max 1958.68 (veh1), mean 1355.00, Q1 1257.93, median 1323.39, Q3 1434.92
Implementation Note:
edgeIDs are mapped to numbers in a numpy array to conserve memory
Expand Down Expand Up @@ -72,7 +72,7 @@ def hasSequence(array, via):
i = npindex(array, via[0])
for edge in via[1:]:
i = npindex(array, edge)
if i == None:
if i is None:
return False
return True

Expand All @@ -92,7 +92,6 @@ def numberToString(n):
def load(baseDir, iterations, suffix="gz"):
"""iterations is an iterable that gives the iteration numberes to load
"""
iteration = set(iterations)
result = []
files = glob.glob(os.path.join(baseDir, "**/*.rou.alt.%s" % suffix))
files = [(int(os.path.basename(os.path.dirname(f))), f) for f in files]
Expand Down Expand Up @@ -152,7 +151,7 @@ def filter(stepRoutes, origin=None, dest=None, via=None, forbidden=None, cutVia=
iStart = 0
iEnd = iStart
elif dest:
iEnd = edges.size - 1
iEnd = r.edges.size - 1
else:
iEnd = iStart
else:
Expand Down
2 changes: 1 addition & 1 deletion tools/build_config/schemaCheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def validate(root, f):
print(normalized + s, file=sys.stderr)
except Exception as e:
print("Error on parsing '%s'!" % normalized, file=sys.stderr)
if haveLxml and type(e) == etree.XMLSyntaxError:
if haveLxml and type(e) is etree.XMLSyntaxError:
# we expect to encounter such errors and don't need a full strack trace
print(e, file=sys.stderr)
else:
Expand Down
3 changes: 2 additions & 1 deletion tools/randomTrips.py
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,8 @@ def generate_one_flow(label, combined_attrs, departureTime, arrivalTime, period,
fouttrips.write((' <flow id="%s" begin="%s" end="%s" period="%s"%s/>\n') % (
label, departureTime, arrivalTime, intIfPossible(period * options.flows), combined_attrs))

def generate_one_personflow(label, combined_attrs, attrFrom, attrTo, arrivalPos, departureTime, arrivalTime, period, options, timeIdx):
def generate_one_personflow(label, combined_attrs, attrFrom, attrTo, arrivalPos,
departureTime, arrivalTime, period, options, timeIdx):
if len(options.period) > 1:
label = label + "#%s" % timeIdx
if options.binomial:
Expand Down
8 changes: 1 addition & 7 deletions tools/routeSampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,13 +248,6 @@ def sample(self, rng, n, begin, end):
scale = right - left
r = [left + v * scale for v in rng.random(n)]

#print("cdf_x", self.cdf_x)
#print("cdf_y", self.cdf_y)
#print("begin", begin, "end", end)
#print("icdf_x", icdf_x)
#print("icdf_y", icdf_y)
#print("scale", scale, "left", left, "right", right)

# evaluate icdf
return np.interp(r, icdf_x, icdf_y)

Expand Down Expand Up @@ -803,6 +796,7 @@ def initTotalCounts(options, routes, intervals, b, e):
" or match the number of data intervals (%s)" % len(intervals))
sys.exit()


def main(options):
rng = np.random.RandomState(options.seed)

Expand Down
10 changes: 6 additions & 4 deletions tools/sumolib/miscutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,19 +40,21 @@


_BLACKLIST = type, ModuleType, FunctionType


def get_size(obj):
"""sum size of object & members.
lifted from https://stackoverflow.com/a/30316760
"""
if isinstance(obj, (_BLACKLIST)):
raise TypeError('getsize() does not take argument of type: '+ str(type(obj)))
raise TypeError('getsize() does not take argument of type: ' + str(type(obj)))
seen_ids = set()
size = 0
objects = [obj]
while objects:
need_referents = []
for obj in objects:
if not isinstance(obj, BLACKLIST) and id(obj) not in seen_ids:
if not isinstance(obj, _BLACKLIST) and id(obj) not in seen_ids:
seen_ids.add(id(obj))
size += sys.getsizeof(obj)
need_referents.append(obj)
Expand All @@ -76,6 +78,7 @@ def benchmark_wrapper(*args, **kwargs):
return result
return benchmark_wrapper


class Benchmarker:
"""
class for benchmarking a function using a "with"-statement.
Expand All @@ -95,6 +98,7 @@ def __exit__(self, *args):
duration = time.time() - self.started
print("%s finished after %s" % (self.description, humanReadableTime(duration)))


class working_dir:
"""
temporarily change working directory using 'with' statement
Expand Down Expand Up @@ -384,5 +388,3 @@ def short_names(filenames, noEmpty):
base = os.path.basename(prefix)
shortened = [base + f for f in shortened]
return shortened


9 changes: 6 additions & 3 deletions tools/visualization/plotXMLAttributes.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,11 @@
@INDEX: the index of the other value within the input file is used.
@RANK: the index of the other value within the sorted (descending) list of values is used
@COUNT: the number of occurrences of the other value is used.
Together with option --barplot or -hbarplot this gives a histogram. Binning size can be set via options --xbin and --ybin.
Together with option --barplot or -hbarplot this gives a histogram.
Binning size can be set via options --xbin and --ybin.
@DENSITY: the number of occurrences of the other value is used, normalized by the total number of values.
@BOX: one or more box plots of the other value are drawn. The --idattr is used for grouping and there will be one box plot per id
@BOX: one or more box plots of the other value are drawn.
The --idattr is used for grouping and there will be one box plot per id
@FILE: the (shortened) input file name is used (useful when plotting one value per file)
Individual trajectories can be clicked in interactive mode to print the data Id on the console.
Expand Down Expand Up @@ -220,6 +222,7 @@ def onpick(event):
mevent = event.mouseevent
print("dataID=%s x=%d y=%d" % (event.artist.get_label(), mevent.xdata, mevent.ydata))


def makeSplitter(splitx, otherIsIndex, ds_fun):
def splitter(file):
for dataID, x, y in ds_fun(file):
Expand All @@ -235,9 +238,9 @@ def splitter(file):
yield dataID, i, y2
else:
yield dataID, x, y2

return splitter


def getDataStream(options):
# determine elements and nesting for the given attributes
# by reading from the first file
Expand Down
2 changes: 1 addition & 1 deletion tools/xml/xml2csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def startElement(self, name, attrs):
return
# collect attributes
for a in sorted(list(attrs.keys())):
if self.keepAttrs is not None and not a in self.keepAttrs:
if self.keepAttrs is not None and a not in self.keepAttrs:
continue
if a not in self.tagAttrs[name] and ":" not in a:
self.tagAttrs[name][a] = xsd.XmlAttribute(a)
Expand Down

0 comments on commit be6d06d

Please sign in to comment.