Skip to content

Commit

Permalink
Revert the issue #1105 and create an update_NEW() function
Browse files Browse the repository at this point in the history
  • Loading branch information
nicolargo committed Jan 18, 2018
1 parent f81ccfc commit 229e8f3
Show file tree
Hide file tree
Showing 2 changed files with 145 additions and 0 deletions.
1 change: 1 addition & 0 deletions glances/amps_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@ def update(self):
"""Update the command result attributed."""
# Search application monitored processes by a regular expression
processlist = glances_processes.getalllist()
logger.info(processlist)
# Iter upon the AMPs dict
for k, v in iteritems(self.get()):
if not v.enable():
Expand Down
144 changes: 144 additions & 0 deletions glances/processes.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,6 +402,150 @@ def update(self):
self.processlist = []
self.reset_processcount()

# Do not process if disable tag is set
if self.disable_tag:
return

# Get the time since last update
time_since_update = getTimeSinceLastUpdate('process_disk')

# Reset the max dict
self.reset_max_values()

# Update the maximum process ID (pid) number
self.processcount['pid_max'] = self.pid_max

# Build an internal dict with only mandatories stats (sort keys)
processdict = {}
excluded_processes = set()
for proc in psutil.process_iter():
# Ignore kernel threads if needed
if self.no_kernel_threads and not WINDOWS and is_kernel_thread(proc):
continue

# If self.max_processes is None: Only retrieve mandatory stats
# Else: retrieve mandatory and standard stats
s = self.__get_process_stats(proc,
mandatory_stats=True,
standard_stats=self.max_processes is None)
# Check if s is note None (issue #879)
# ignore the 'idle' process on Windows and *BSD
# ignore the 'kernel_task' process on macOS
# waiting for upstream patch from psutil
if (s is None or
BSD and s['name'] == 'idle' or
WINDOWS and s['name'] == 'System Idle Process' or
MACOS and s['name'] == 'kernel_task'):
continue
# Continue to the next process if it has to be filtered
if self._filter.is_filtered(s):
excluded_processes.add(proc)
continue

# Ok add the process to the list
processdict[proc] = s
# Update processcount (global statistics)
try:
self.processcount[str(proc.status())] += 1
except KeyError:
# Key did not exist, create it
try:
self.processcount[str(proc.status())] = 1
except psutil.NoSuchProcess:
pass
except psutil.NoSuchProcess:
pass
else:
self.processcount['total'] += 1
# Update thread number (global statistics)
try:
self.processcount['thread'] += proc.num_threads()
except Exception:
pass

if self._enable_tree:
self.process_tree = ProcessTreeNode.build_tree(processdict,
self.sort_key,
self.sort_reverse,
self.no_kernel_threads,
excluded_processes)

for i, node in enumerate(self.process_tree):
# Only retreive stats for visible processes (max_processes)
if self.max_processes is not None and i >= self.max_processes:
break

# add standard stats
new_stats = self.__get_process_stats(node.process,
mandatory_stats=False,
standard_stats=True,
extended_stats=False)
if new_stats is not None:
node.stats.update(new_stats)

# Add a specific time_since_update stats for bitrate
node.stats['time_since_update'] = time_since_update

else:
# Process optimization
# Only retreive stats for visible processes (max_processes)
if self.max_processes is not None:
# Sort the internal dict and cut the top N (Return a list of tuple)
# tuple=key (proc), dict (returned by __get_process_stats)
try:
processiter = sorted(iteritems(processdict),
key=lambda x: x[1][self.sort_key],
reverse=self.sort_reverse)
except (KeyError, TypeError) as e:
logger.error("Cannot sort process list by {}: {}".format(self.sort_key, e))
logger.error('{}'.format(listitems(processdict)[0]))
# Fallback to all process (issue #423)
processloop = iteritems(processdict)
first = False
else:
processloop = processiter[0:self.max_processes]
first = True
else:
# Get all processes stats
processloop = iteritems(processdict)
first = False

for i in processloop:
# Already existing mandatory stats
procstat = i[1]
if self.max_processes is not None:
# Update with standard stats
# and extended stats but only for TOP (first) process
s = self.__get_process_stats(i[0],
mandatory_stats=False,
standard_stats=True,
extended_stats=first)
if s is None:
continue
procstat.update(s)
# Add a specific time_since_update stats for bitrate
procstat['time_since_update'] = time_since_update
# Update process list
self.processlist.append(procstat)
# Next...
first = False

# Build the all processes list used by the AMPs
self.allprocesslist = [p for p in itervalues(processdict)]

# Clean internals caches if timeout is reached
if self.cache_timer.finished():
self.username_cache = {}
self.cmdline_cache = {}
# Restart the timer
self.cache_timer.reset()

def update_NEW(self):
"""Update the processes stats."""
# Reset the stats
self.processlist = []
self.reset_processcount()

# Do not process if disable tag is set
if self.disable_tag:
return
Expand Down

0 comments on commit 229e8f3

Please sign in to comment.