first commit

This commit is contained in:
Ayxan
2022-05-23 00:16:32 +04:00
commit d660f2a4ca
24786 changed files with 4428337 additions and 0 deletions

View File

@@ -0,0 +1 @@
__version__ = '1.0.1'

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,62 @@
"""
Generate charts from gathered data.
Requires **matplotlib**.
"""
from pympler.classtracker_stats import Stats
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def tracker_timespace(filename: str, stats: Stats) -> None:
"""
Create a time-space chart from a ``Stats`` instance.
"""
classlist = list(stats.index.keys())
classlist.sort()
for snapshot in stats.snapshots:
stats.annotate_snapshot(snapshot)
timestamps = [fp.timestamp for fp in stats.snapshots]
offsets = [0] * len(stats.snapshots)
poly_labels = []
polys = []
for clsname in classlist:
pct = [fp.classes[clsname]['pct'] for fp in stats.snapshots
if fp.classes and clsname in fp.classes]
if max(pct) > 3.0:
sizes = [fp.classes[clsname]['sum'] for fp in stats.snapshots
if fp.classes and clsname in fp.classes]
sizes = [float(x) / (1024 * 1024) for x in sizes]
sizes = [offset + size for offset, size in zip(offsets, sizes)]
poly = matplotlib.mlab.poly_between(timestamps, offsets, sizes)
polys.append((poly, {'label': clsname}))
poly_labels.append(clsname)
offsets = sizes
fig = plt.figure(figsize=(10, 4))
axis = fig.add_subplot(111)
axis.set_title("Snapshot Memory")
axis.set_xlabel("Execution Time [s]")
axis.set_ylabel("Virtual Memory [MiB]")
totals = [float(x.asizeof_total) / (1024 * 1024)
for x in stats.snapshots]
axis.plot(timestamps, totals, 'r--', label='Total')
tracked = [float(x.tracked_total) / (1024 * 1024)
for x in stats.snapshots]
axis.plot(timestamps, tracked, 'b--', label='Tracked total')
for (args, kwds) in polys:
axis.fill(*args, **kwds)
axis.legend(loc=2) # TODO fill legend
fig.savefig(filename)
except ImportError:
def tracker_timespace(filename: str, stats: Stats) -> None:
pass

View File

@@ -0,0 +1,590 @@
"""
The `ClassTracker` is a facility delivering insight into the memory
distribution of a Python program. It can introspect memory consumption of
certain classes and objects. Facilities are provided to track and size
individual objects or all instances of certain classes. Tracked objects are
sized recursively to provide an overview of memory distribution between the
different tracked objects.
"""
from typing import Any, Callable, Dict, IO, List, Optional, Tuple
from collections import defaultdict
from functools import partial
from inspect import stack, isclass
from threading import Thread, Lock
from time import sleep, time
from weakref import ref as weakref_ref
from pympler.classtracker_stats import ConsoleStats
from pympler.util.stringutils import safe_repr
import pympler.asizeof as asizeof
import pympler.process
__all__ = ["ClassTracker"]
# Fixpoint for program start relative time stamp.
_local_start = time()
class _ClassObserver(object):
"""
Stores options for tracked classes.
The observer also keeps the original constructor of the observed class.
"""
__slots__ = ('init', 'name', 'detail', 'keep', 'trace')
def __init__(self, init: Callable, name: str, detail: int, keep: bool,
trace: bool):
self.init = init
self.name = name
self.detail = detail
self.keep = keep
self.trace = trace
def modify(self, name: str, detail: int, keep: bool, trace: bool) -> None:
self.name = name
self.detail = detail
self.keep = keep
self.trace = trace
def _get_time() -> float:
"""
Get a timestamp relative to the program start time.
"""
return time() - _local_start
class TrackedObject(object):
"""
Stores size and lifetime information of a tracked object. A weak reference
is attached to monitor the object without preventing its deletion.
"""
__slots__ = ("ref", "id", "repr", "name", "birth", "death", "trace",
"snapshots", "_resolution_level", "__dict__")
def __init__(self, instance: Any, name: str, resolution_level: int = 0,
trace: bool = False, on_delete: Optional[Callable] = None):
"""
Create a weak reference for 'instance' to observe an object but which
won't prevent its deletion (which is monitored by the finalize
callback). The size of the object is recorded in 'snapshots' as
(timestamp, size) tuples.
"""
self.ref = weakref_ref(instance, self.finalize)
self.id = id(instance)
self.repr = ''
self.name = name
self.birth = _get_time()
self.death = None # type: Optional[float]
self._resolution_level = resolution_level
self.trace = None # type: Optional[List[Tuple]]
if trace:
self._save_trace()
initial_size = asizeof.basicsize(instance) or 0
size = asizeof.Asized(initial_size, initial_size)
self.snapshots = [(self.birth, size)]
self.on_delete = on_delete
def __getstate__(self) -> Dict:
"""
Make the object serializable for dump_stats. Read the available slots
and store the values in a dictionary. Derived values (stored in the
dict) are not pickled as those can be reconstructed based on the other
data. References cannot be serialized, ignore 'ref' as well.
"""
state = {}
for name in getattr(TrackedObject, '__slots__', ()):
if hasattr(self, name) and name not in ['ref', '__dict__']:
state[name] = getattr(self, name)
return state
def __setstate__(self, state: Dict) -> None:
"""
Restore the state from pickled data. Needed because a slotted class is
used.
"""
for key, value in list(state.items()):
setattr(self, key, value)
def _save_trace(self) -> None:
"""
Save current stack trace as formatted string.
"""
stack_trace = stack()
try:
self.trace = []
for frm in stack_trace[5:]: # eliminate our own overhead
self.trace.insert(0, frm[1:])
finally:
del stack_trace
def track_size(self, ts: float, sizer: asizeof.Asizer) -> None:
"""
Store timestamp and current size for later evaluation.
The 'sizer' is a stateful sizing facility that excludes other tracked
objects.
"""
obj = self.ref()
self.snapshots.append(
(ts, sizer.asized(obj, detail=self._resolution_level))
)
if obj is not None:
self.repr = safe_repr(obj, clip=128)
def get_max_size(self) -> int:
"""
Get the maximum of all sampled sizes.
"""
return max([s.size for (_, s) in self.snapshots])
def get_size_at_time(self, timestamp: float) -> int:
"""
Get the size of the object at a specific time (snapshot).
If the object was not alive/sized at that instant, return 0.
"""
size = 0
for (t, s) in self.snapshots:
if t == timestamp:
size = s.size
return size
def set_resolution_level(self, resolution_level: int) -> None:
"""
Set resolution level to a new value. The next size estimation will
respect the new value. This is useful to set different levels for
different instances of tracked classes.
"""
self._resolution_level = resolution_level
def finalize(self, ref: weakref_ref) -> None:
"""
Mark the reference as dead and remember the timestamp. It would be
great if we could measure the pre-destruction size. Unfortunately, the
object is gone by the time the weakref callback is called. However,
weakref callbacks are useful to be informed when tracked objects died
without the need of destructors.
If the object is destroyed at the end of the program execution, it's
not possible to import modules anymore. Hence, the finalize callback
just does nothing (self.death stays None).
"""
try:
self.death = _get_time()
if self.on_delete:
self.on_delete()
except Exception: # pragma: no cover
pass
def track_object_creation(time_series: List[Tuple[float, int]]) -> None:
num_instances = time_series[-1][1] if time_series else 0
time_series.append((_get_time(), num_instances+1))
def track_object_deletion(time_series: List[Tuple[float, int]]) -> None:
num_instances = time_series[-1][1]
time_series.append((_get_time(), num_instances-1))
class PeriodicThread(Thread):
"""
Thread object to take snapshots periodically.
"""
def __init__(self, tracker: 'ClassTracker', interval: float, *args: Any,
**kwargs: Any):
"""
Create thread with given interval and associated with the given
tracker.
"""
self.interval = interval
self.tracker = tracker
self.stop = False
super(PeriodicThread, self).__init__(*args, **kwargs)
def run(self) -> None:
"""
Loop until a stop signal is set.
"""
self.stop = False
while not self.stop:
self.tracker.create_snapshot()
sleep(self.interval)
class Snapshot(object):
"""Sample sizes of objects and the process at an instant."""
def __init__(self, timestamp: float, description: str = '') -> None:
"""Initialize process-wide size information."""
self.tracked_total = 0
self.asizeof_total = 0
self.overhead = 0
self.timestamp = timestamp
self.system_total = pympler.process.ProcessMemoryInfo()
self.desc = description
self.classes = None # type: Optional[Dict[str, Dict[str, Any]]]
@property
def total(self) -> int:
"""
Return the total (virtual) size of the process in bytes. If process
information is not available, get the best number available, even if it
is a poor approximation of reality.
"""
if self.system_total.available:
return self.system_total.vsz
elif self.asizeof_total: # pragma: no cover
return self.asizeof_total
else: # pragma: no cover
return self.tracked_total
@property
def label(self) -> str:
"""Return timestamped label for this snapshot, or a raw timestamp."""
if not self.desc:
return "%.3fs" % self.timestamp
return "%s (%.3fs)" % (self.desc, self.timestamp)
class ClassTracker(object):
def __init__(self, stream: Optional[IO] = None):
"""
Creates a new `ClassTracker` object.
:param stream: Output stream to use when printing statistics via
``stats``.
"""
# Dictionaries of TrackedObject objects associated with the actual
# objects that are tracked. 'index' uses the class name as the key and
# associates a list of tracked objects. It contains all TrackedObject
# instances, including those of dead objects.
self.index = defaultdict(list) # type: Dict[str, List[TrackedObject]]
# 'objects' uses the id (address) as the key and associates the tracked
# object with it. TrackedObject's referring to dead objects are
# replaced lazily, i.e. when the id is recycled by another tracked
# object.
self.objects = {} # type: Dict[int, Any]
# List of `Snapshot` objects.
self.snapshots = [] # type: List[Snapshot]
# Time series of instance count for each tracked class.
self.history = defaultdict(list) \
# type: Dict[str, List[Tuple[float, int]]]
# Keep objects alive by holding a strong reference.
self._keepalive = [] # type: List[Any]
# Dictionary of class observers identified by classname.
self._observers = {} # type: Dict[type, _ClassObserver]
# Thread object responsible for background monitoring
self._periodic_thread = None # type: Optional[PeriodicThread]
self._stream = stream
@property
def stats(self) -> ConsoleStats:
"""
Return a ``ConsoleStats`` instance initialized with the current state
of the class tracker.
"""
return ConsoleStats(tracker=self, stream=self._stream)
def _tracker(self, _observer_: _ClassObserver, _self_: Any, *args: Any,
**kwds: Any) -> None:
"""
Injected constructor for tracked classes.
Call the actual constructor of the object and track the object. Attach
to the object before calling the constructor to track the object with
the parameters of the most specialized class.
"""
self.track_object(_self_,
name=_observer_.name,
resolution_level=_observer_.detail,
keep=_observer_.keep,
trace=_observer_.trace)
_observer_.init(_self_, *args, **kwds)
def _inject_constructor(self, cls: type, func: Callable, name: str,
resolution_level: int, keep: bool, trace: bool,
) -> None:
"""
Modifying Methods in Place - after the recipe 15.7 in the Python
Cookbook by Ken Seehof. The original constructors may be restored
later.
"""
try:
constructor = cls.__init__ # type: ignore
except AttributeError:
def constructor(self: Any, *_args: Any, **_kwargs: Any) -> None:
pass
# Possible name clash between keyword arguments of the tracked class'
# constructor and the curried arguments of the injected constructor.
# Therefore, the additional argument has a 'magic' name to make it less
# likely that an argument name clash occurs.
observer = _ClassObserver(constructor,
name,
resolution_level,
keep,
trace)
self._observers[cls] = observer
def new_constructor(*args: Any, **kwargs: Any) -> None:
return func(observer, *args, **kwargs)
cls.__init__ = new_constructor # type: ignore
def _is_tracked(self, cls: type) -> bool:
"""
Determine if the class is tracked.
"""
return cls in self._observers
def _track_modify(self, cls: type, name: str, detail: int, keep: bool,
trace: bool) -> None:
"""
Modify settings of a tracked class
"""
self._observers[cls].modify(name, detail, keep, trace)
def _restore_constructor(self, cls: type) -> None:
"""
Restore the original constructor, lose track of class.
"""
cls.__init__ = self._observers[cls].init # type: ignore
del self._observers[cls]
def track_change(self, instance: Any, resolution_level: int = 0) -> None:
"""
Change tracking options for the already tracked object 'instance'.
If instance is not tracked, a KeyError will be raised.
"""
tobj = self.objects[id(instance)]
tobj.set_resolution_level(resolution_level)
def track_object(self, instance: Any, name: Optional[str] = None,
resolution_level: int = 0, keep: bool = False,
trace: bool = False) -> None:
"""
Track object 'instance' and sample size and lifetime information. Not
all objects can be tracked; trackable objects are class instances and
other objects that can be weakly referenced. When an object cannot be
tracked, a `TypeError` is raised.
:param resolution_level: The recursion depth up to which referents are
sized individually. Resolution level 0 (default) treats the object
as an opaque entity, 1 sizes all direct referents individually, 2
also sizes the referents of the referents and so forth.
:param keep: Prevent the object's deletion by keeping a (strong)
reference to the object.
"""
# Check if object is already tracked. This happens if track_object is
# called multiple times for the same object or if an object inherits
# from multiple tracked classes. In the latter case, the most
# specialized class wins. To detect id recycling, the weak reference
# is checked. If it is 'None' a tracked object is dead and another one
# takes the same 'id'.
if id(instance) in self.objects and \
self.objects[id(instance)].ref() is not None:
return
name = name if name else instance.__class__.__name__
track_object_creation(self.history[name])
on_delete = partial(track_object_deletion, self.history[name])
tobj = TrackedObject(instance,
name,
resolution_level=resolution_level,
trace=trace,
on_delete=on_delete)
self.index[name].append(tobj)
self.objects[id(instance)] = tobj
if keep:
self._keepalive.append(instance)
def track_class(self, cls: type, name: Optional[str] = None,
resolution_level: int = 0, keep: bool = False,
trace: bool = False) -> None:
"""
Track all objects of the class `cls`. Objects of that type that already
exist are *not* tracked. If `track_class` is called for a class already
tracked, the tracking parameters are modified. Instantiation traces can
be generated by setting `trace` to True.
A constructor is injected to begin instance tracking on creation
of the object. The constructor calls `track_object` internally.
:param cls: class to be tracked, may be an old-style or a new-style
class
:param name: reference the class by a name, default is the
concatenation of module and class name
:param resolution_level: The recursion depth up to which referents are
sized individually. Resolution level 0 (default) treats the object
as an opaque entity, 1 sizes all direct referents individually, 2
also sizes the referents of the referents and so forth.
:param keep: Prevent the object's deletion by keeping a (strong)
reference to the object.
:param trace: Save instantiation stack trace for each instance
"""
if not isclass(cls):
raise TypeError("only class objects can be tracked")
if name is None:
name = cls.__module__ + '.' + cls.__name__
if self._is_tracked(cls):
self._track_modify(cls, name, resolution_level, keep, trace)
else:
self._inject_constructor(cls, self._tracker, name,
resolution_level, keep, trace)
def detach_class(self, cls: type) -> None:
"""
Stop tracking class 'cls'. Any new objects of that type are not
tracked anymore. Existing objects are still tracked.
"""
self._restore_constructor(cls)
def detach_all_classes(self) -> None:
"""
Detach from all tracked classes.
"""
classes = list(self._observers.keys())
for cls in classes:
self.detach_class(cls)
def detach_all(self) -> None:
"""
Detach from all tracked classes and objects.
Restore the original constructors and cleanse the tracking lists.
"""
self.detach_all_classes()
self.objects.clear()
self.index.clear()
self._keepalive[:] = []
def clear(self) -> None:
"""
Clear all gathered data and detach from all tracked objects/classes.
"""
self.detach_all()
self.snapshots[:] = []
def close(self) -> None:
"""
Detach from tracked classes by removing injected constructors. Makes it
possible to use ClassTracker in `contextlib.closing` to safely remove
profiling hooks when the tracker goes out of scope::
import contextlib
with contextlib.closing(ClassTracker()) as tracker:
tracker.track_class(Foo)
"""
self.detach_all_classes()
#
# Background Monitoring
#
def start_periodic_snapshots(self, interval: float = 1.0) -> None:
"""
Start a thread which takes snapshots periodically. The `interval`
specifies the time in seconds the thread waits between taking
snapshots. The thread is started as a daemon allowing the program to
exit. If periodic snapshots are already active, the interval is
updated.
"""
if not self._periodic_thread:
self._periodic_thread = PeriodicThread(self, interval,
name='BackgroundMonitor')
self._periodic_thread.setDaemon(True)
self._periodic_thread.start()
else:
self._periodic_thread.interval = interval
def stop_periodic_snapshots(self) -> None:
"""
Post a stop signal to the thread that takes the periodic snapshots. The
function waits for the thread to terminate which can take some time
depending on the configured interval.
"""
if self._periodic_thread and self._periodic_thread.is_alive():
self._periodic_thread.stop = True
self._periodic_thread.join()
self._periodic_thread = None
#
# Snapshots
#
snapshot_lock = Lock()
def create_snapshot(self, description: str = '',
compute_total: bool = False) -> None:
"""
Collect current per instance statistics and saves total amount of
memory associated with the Python process.
If `compute_total` is `True`, the total consumption of all objects
known to *asizeof* is computed. The latter might be very slow if many
objects are mapped into memory at the time the snapshot is taken.
Therefore, `compute_total` is set to `False` by default.
The overhead of the `ClassTracker` structure is also computed.
Snapshots can be taken asynchronously. The function is protected with a
lock to prevent race conditions.
"""
try:
# TODO: It is not clear what happens when memory is allocated or
# released while this function is executed but it will likely lead
# to inconsistencies. Either pause all other threads or don't size
# individual objects in asynchronous mode.
self.snapshot_lock.acquire()
timestamp = _get_time()
sizer = asizeof.Asizer()
objs = [tobj.ref() for tobj in list(self.objects.values())]
sizer.exclude_refs(*objs)
# The objects need to be sized in a deterministic order. Sort the
# objects by its creation date which should at least work for
# non-parallel execution. The "proper" fix would be to handle
# shared data separately.
tracked_objects = list(self.objects.values())
tracked_objects.sort(key=lambda x: x.birth)
for tobj in tracked_objects:
tobj.track_size(timestamp, sizer)
snapshot = Snapshot(timestamp, str(description))
snapshot.tracked_total = sizer.total
if compute_total:
snapshot.asizeof_total = asizeof.asizeof(all=True, code=True)
# Compute overhead of all structures, use sizer to exclude tracked
# objects(!)
snapshot.overhead = 0
if snapshot.tracked_total:
snapshot.overhead = sizer.asizeof(self)
if snapshot.asizeof_total:
snapshot.asizeof_total -= snapshot.overhead
self.snapshots.append(snapshot)
finally:
self.snapshot_lock.release()

View File

@@ -0,0 +1,780 @@
"""
Provide saving, loading and presenting gathered `ClassTracker` statistics.
"""
from typing import (
Any, Dict, IO, Iterable, List, Optional, Tuple, TYPE_CHECKING, Union
)
import os
import pickle
import sys
from copy import deepcopy
from pympler.util.stringutils import trunc, pp, pp_timestamp
from pympler.asizeof import Asized
if TYPE_CHECKING:
from .classtracker import TrackedObject, ClassTracker, Snapshot
__all__ = ["Stats", "ConsoleStats", "HtmlStats"]
def _ref2key(ref: Asized) -> str:
return ref.name.split(':')[0]
def _merge_asized(base: Asized, other: Asized, level: int = 0) -> None:
"""
Merge **Asized** instances `base` and `other` into `base`.
"""
base.size += other.size
base.flat += other.flat
if level > 0:
base.name = _ref2key(base)
# Add refs from other to base. Any new refs are appended.
base.refs = list(base.refs) # we may need to append items
refs = {}
for ref in base.refs:
refs[_ref2key(ref)] = ref
for ref in other.refs:
key = _ref2key(ref)
if key in refs:
_merge_asized(refs[key], ref, level=level + 1)
else:
# Don't modify existing Asized instances => deepcopy
base.refs.append(deepcopy(ref))
base.refs[-1].name = key
def _merge_objects(tref: float, merged: Asized, obj: 'TrackedObject') -> None:
"""
Merge the snapshot size information of multiple tracked objects. The
tracked object `obj` is scanned for size information at time `tref`.
The sizes are merged into **Asized** instance `merged`.
"""
size = None
for (timestamp, tsize) in obj.snapshots:
if timestamp == tref:
size = tsize
if size:
_merge_asized(merged, size)
def _format_trace(trace: List[Tuple]) -> str:
"""
Convert the (stripped) stack-trace to a nice readable format. The stack
trace `trace` is a list of frame records as returned by
**inspect.stack** but without the frame objects.
Returns a string.
"""
lines = []
for fname, lineno, func, src, _ in trace:
if src:
for line in src:
lines.append(' ' + line.strip() + '\n')
lines.append(' %s:%4d in %s\n' % (fname, lineno, func))
return ''.join(lines)
class Stats(object):
"""
Presents the memory statistics gathered by a `ClassTracker` based on user
preferences.
"""
def __init__(self, tracker: 'Optional[ClassTracker]' = None,
filename: Optional[str] = None,
stream: Optional[IO] = None):
"""
Initialize the data log structures either from a `ClassTracker`
instance (argument `tracker`) or a previously dumped file (argument
`filename`).
:param tracker: ClassTracker instance
:param filename: filename of previously dumped statistics
:param stream: where to print statistics, defaults to ``sys.stdout``
"""
if stream:
self.stream = stream
else:
self.stream = sys.stdout
self.tracker = tracker
self.index = {} # type: Dict[str, List[TrackedObject]]
self.snapshots = [] # type: List[Snapshot]
if tracker:
self.index = tracker.index
self.snapshots = tracker.snapshots
self.history = tracker.history
self.sorted = [] # type: List[TrackedObject]
if filename:
self.load_stats(filename)
def load_stats(self, fdump: Union[str, IO[bytes]]) -> None:
"""
Load the data from a dump file.
The argument `fdump` can be either a filename or an open file object
that requires read access.
"""
if isinstance(fdump, str):
fdump = open(fdump, 'rb')
self.index = pickle.load(fdump)
self.snapshots = pickle.load(fdump)
self.sorted = []
def dump_stats(self, fdump: Union[str, IO[bytes]], close: bool = True
) -> None:
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, str):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close()
def _init_sort(self) -> None:
"""
Prepare the data to be sorted.
If not yet sorted, import all tracked objects from the tracked index.
Extend the tracking information by implicit information to make
sorting easier (DSU pattern).
"""
if not self.sorted:
# Identify the snapshot that tracked the largest amount of memory.
tmax = None
maxsize = 0
for snapshot in self.snapshots:
if snapshot.tracked_total > maxsize:
tmax = snapshot.timestamp
for key in list(self.index.keys()):
for tobj in self.index[key]:
tobj.classname = key # type: ignore
tobj.size = tobj.get_max_size() # type: ignore
tobj.tsize = tobj.get_size_at_time(tmax) # type: ignore
self.sorted.extend(self.index[key])
def sort_stats(self, *args: str) -> 'Stats':
"""
Sort the tracked objects according to the supplied criteria. The
argument is a string identifying the basis of a sort (example: 'size'
or 'classname'). When more than one key is provided, then additional
keys are used as secondary criteria when there is equality in all keys
selected before them. For example, ``sort_stats('name', 'size')`` will
sort all the entries according to their class name, and resolve all
ties (identical class names) by sorting by size. The criteria are
fields in the tracked object instances. Results are stored in the
``self.sorted`` list which is used by ``Stats.print_stats()`` and other
methods. The fields available for sorting are:
'classname'
the name with which the class was registered
'name'
the classname
'birth'
creation timestamp
'death'
destruction timestamp
'size'
the maximum measured size of the object
'tsize'
the measured size during the largest snapshot
'repr'
string representation of the object
Note that sorts on size are in descending order (placing most memory
consuming items first), whereas name, repr, and creation time searches
are in ascending order (alphabetical).
The function returns self to allow calling functions on the result::
stats.sort_stats('size').reverse_order().print_stats()
"""
criteria = ('classname', 'tsize', 'birth', 'death',
'name', 'repr', 'size')
if not set(criteria).issuperset(set(args)):
raise ValueError("Invalid sort criteria")
if not args:
args = criteria
def args_to_tuple(obj: 'TrackedObject') -> Tuple[str, ...]:
keys: List[str] = []
for attr in args:
attribute = getattr(obj, attr, '')
if attr in ('tsize', 'size'):
attribute = -int(attribute)
keys.append(attribute)
return tuple(keys)
self._init_sort()
self.sorted.sort(key=args_to_tuple)
return self
def reverse_order(self) -> 'Stats':
"""
Reverse the order of the tracked instance index `self.sorted`.
"""
self._init_sort()
self.sorted.reverse()
return self
def annotate(self) -> None:
"""
Annotate all snapshots with class-based summaries.
"""
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
def annotate_snapshot(self, snapshot: 'Snapshot'
) -> Dict[str, Dict[str, Any]]:
"""
Store additional statistical data in snapshot.
"""
if snapshot.classes is not None:
return snapshot.classes
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if (tobj.birth < snapshot.timestamp and
(tobj.death is None or
tobj.death > snapshot.timestamp)):
active += 1
try:
pct = total * 100.0 / snapshot.total
except ZeroDivisionError: # pragma: no cover
pct = 0
try:
avg = total / active
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total,
avg=avg,
pct=pct,
active=active)
snapshot.classes[classname]['merged'] = merged
return snapshot.classes
@property
def tracked_classes(self) -> List[str]:
"""Return a list of all tracked classes occurring in any snapshot."""
return sorted(list(self.index.keys()))
class ConsoleStats(Stats):
"""
Presentation layer for `Stats` to be used in text-based consoles.
"""
def _print_refs(self, refs: Iterable[Asized], total: int,
prefix: str = ' ', level: int = 1, minsize: int = 0,
minpct: float = 0.1) -> None:
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
for ref in lrefs:
if ref.size > minsize and (ref.size * 100.0 / total) > minpct:
self.stream.write('%-50s %-14s %3d%% [%d]\n' % (
trunc(prefix + str(ref.name), 50),
pp(ref.size),
int(ref.size * 100.0 / total),
level
))
self._print_refs(ref.refs, total, prefix=prefix + ' ',
level=level + 1)
def print_object(self, tobj: 'TrackedObject') -> None:
"""
Print the gathered information of object `tobj` in human-readable
format.
"""
if tobj.death:
self.stream.write('%-32s ( free ) %-35s\n' % (
trunc(tobj.name, 32, left=True), trunc(tobj.repr, 35)))
else:
self.stream.write('%-32s 0x%08x %-35s\n' % (
trunc(tobj.name, 32, left=True),
tobj.id,
trunc(tobj.repr, 35)
))
if tobj.trace:
self.stream.write(_format_trace(tobj.trace))
for (timestamp, size) in tobj.snapshots:
self.stream.write(' %-30s %s\n' % (
pp_timestamp(timestamp), pp(size.size)
))
self._print_refs(size.refs, size.size)
if tobj.death is not None:
self.stream.write(' %-30s finalize\n' % (
pp_timestamp(tobj.death),
))
def print_stats(self, clsname: Optional[str] = None, limit: float = 1.0
) -> None:
"""
Write tracked objects to stdout. The output can be filtered and
pruned. Only objects are printed whose classname contain the substring
supplied by the `clsname` argument. The output can be pruned by
passing a `limit` value.
:param clsname: Only print objects whose classname contain the given
substring.
:param limit: If `limit` is a float smaller than one, only the supplied
percentage of the total tracked data is printed. If `limit` is
bigger than one, this number of tracked objects are printed.
Tracked objects are first filtered, and then pruned (if specified).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if not self.sorted:
self.sort_stats()
_sorted = self.sorted
if clsname:
_sorted = [
to for to in _sorted
if clsname in to.classname # type: ignore
]
if limit < 1.0:
limit = max(1, int(len(self.sorted) * limit))
_sorted = _sorted[:int(limit)]
# Emit per-instance data
for tobj in _sorted:
self.print_object(tobj)
def print_summary(self) -> None:
"""
Print per-class summary for each snapshot.
"""
# Emit class summaries for each snapshot
classlist = self.tracked_classes
fobj = self.stream
fobj.write('---- SUMMARY ' + '-' * 66 + '\n')
for snapshot in self.snapshots:
classes = self.annotate_snapshot(snapshot)
fobj.write('%-35s %11s %12s %12s %5s\n' % (
trunc(snapshot.desc, 35),
'active',
pp(snapshot.asizeof_total),
'average',
'pct'
))
for classname in classlist:
info = classes[classname]
fobj.write(' %-33s %11d %12s %12s %4d%%\n' % (
trunc(classname, 33),
info['active'],
pp(info['sum']),
pp(info['avg']),
info['pct']
))
fobj.write('-' * 79 + '\n')
class HtmlStats(Stats):
"""
Output the `ClassTracker` statistics as HTML pages and graphs.
"""
style = """<style type="text/css">
table { width:100%; border:1px solid #000; border-spacing:0px; }
td, th { border:0px; }
div { width:200px; padding:10px; background-color:#FFEECC; }
#nb { border:0px; }
#tl { margin-top:5mm; margin-bottom:5mm; }
#p1 { padding-left: 5px; }
#p2 { padding-left: 50px; }
#p3 { padding-left: 100px; }
#p4 { padding-left: 150px; }
#p5 { padding-left: 200px; }
#p6 { padding-left: 210px; }
#p7 { padding-left: 220px; }
#hl { background-color:#FFFFCC; }
#r1 { background-color:#BBBBBB; }
#r2 { background-color:#CCCCCC; }
#r3 { background-color:#DDDDDD; }
#r4 { background-color:#EEEEEE; }
#r5,#r6,#r7 { background-color:#FFFFFF; }
#num { text-align:right; }
</style>
"""
nopylab_msg = """<div color="#FFCCCC">Could not generate %s chart!
Install <a href="http://matplotlib.sourceforge.net/">Matplotlib</a>
to generate charts.</div>\n"""
chart_tag = '<img src="%s">\n'
header = "<html><head><title>%s</title>%s</head><body>\n"
tableheader = '<table border="1">\n'
tablefooter = '</table>\n'
footer = '</body></html>\n'
refrow = """<tr id="r%(level)d">
<td id="p%(level)d">%(name)s</td>
<td id="num">%(size)s</td>
<td id="num">%(pct)3.1f%%</td></tr>"""
def _print_refs(self, fobj: IO, refs: Iterable[Asized], total: int,
level: int = 1, minsize: int = 0, minpct: float = 0.1
) -> None:
"""
Print individual referents recursively.
"""
lrefs = list(refs)
lrefs.sort(key=lambda x: x.size)
lrefs.reverse()
if level == 1:
fobj.write('<table>\n')
for ref in lrefs:
if ref.size > minsize and (ref.size * 100.0 / total) > minpct:
data = dict(level=level,
name=trunc(str(ref.name), 128),
size=pp(ref.size),
pct=ref.size * 100.0 / total)
fobj.write(self.refrow % data)
self._print_refs(fobj, ref.refs, total, level=level + 1)
if level == 1:
fobj.write("</table>\n")
class_summary = """<p>%(cnt)d instances of %(cls)s were registered. The
average size is %(avg)s, the minimal size is %(min)s, the maximum size
is %(max)s.</p>\n"""
class_snapshot = '''<h3>Snapshot: %(name)s, %(total)s occupied by instances
of class %(cls)s</h3>\n'''
def print_class_details(self, fname: str, classname: str) -> None:
"""
Print detailed statistics and instances for the class `classname`. All
data will be written to the file `fname`.
"""
fobj = open(fname, "w")
fobj.write(self.header % (classname, self.style))
fobj.write("<h1>%s</h1>\n" % (classname))
sizes = [tobj.get_max_size() for tobj in self.index[classname]]
total = 0
for s in sizes:
total += s
data = {'cnt': len(self.index[classname]), 'cls': classname}
data['avg'] = pp(total / len(sizes))
data['max'] = pp(max(sizes))
data['min'] = pp(min(sizes))
fobj.write(self.class_summary % data)
fobj.write(self.charts[classname])
fobj.write("<h2>Coalesced Referents per Snapshot</h2>\n")
for snapshot in self.snapshots:
if snapshot.classes and classname in snapshot.classes:
merged = snapshot.classes[classname]['merged']
fobj.write(self.class_snapshot % {
'name': snapshot.desc,
'cls': classname,
'total': pp(merged.size),
})
if merged.refs:
self._print_refs(fobj, merged.refs, merged.size)
else:
fobj.write('<p>No per-referent sizes recorded.</p>\n')
fobj.write("<h2>Instances</h2>\n")
for tobj in self.index[classname]:
fobj.write('<table id="tl" width="100%" rules="rows">\n')
fobj.write('<tr><td id="hl" width="140px">Instance</td>' +
'<td id="hl">%s at 0x%08x</td></tr>\n' %
(tobj.name, tobj.id))
if tobj.repr:
fobj.write("<tr><td>Representation</td>" +
"<td>%s&nbsp;</td></tr>\n" % tobj.repr)
fobj.write("<tr><td>Lifetime</td><td>%s - %s</td></tr>\n" %
(pp_timestamp(tobj.birth), pp_timestamp(tobj.death)))
if tobj.trace:
trace = "<pre>%s</pre>" % (_format_trace(tobj.trace))
fobj.write("<tr><td>Instantiation</td><td>%s</td></tr>\n" %
trace)
for (timestamp, size) in tobj.snapshots:
fobj.write("<tr><td>%s</td>" % pp_timestamp(timestamp))
if not size.refs:
fobj.write("<td>%s</td></tr>\n" % pp(size.size))
else:
fobj.write("<td>%s" % pp(size.size))
self._print_refs(fobj, size.refs, size.size)
fobj.write("</td></tr>\n")
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
snapshot_cls_header = """<tr>
<th id="hl">Class</th>
<th id="hl" align="right">Instance #</th>
<th id="hl" align="right">Total</th>
<th id="hl" align="right">Average size</th>
<th id="hl" align="right">Share</th></tr>\n"""
snapshot_cls = """<tr>
<td>%(cls)s</td>
<td align="right">%(active)d</td>
<td align="right">%(sum)s</td>
<td align="right">%(avg)s</td>
<td align="right">%(pct)3.2f%%</td></tr>\n"""
snapshot_summary = """<p>Total virtual memory assigned to the program
at that time was %(sys)s, which includes %(overhead)s profiling
overhead. The ClassTracker tracked %(tracked)s in total. The measurable
objects including code objects but excluding overhead have a total size
of %(asizeof)s.</p>\n"""
def relative_path(self, filepath: str, basepath: Optional[str] = None
) -> str:
"""
Convert the filepath path to a relative path against basepath. By
default basepath is self.basedir.
"""
if basepath is None:
basepath = self.basedir
if not basepath:
return filepath
if filepath.startswith(basepath):
filepath = filepath[len(basepath):]
if filepath and filepath[0] == os.sep:
filepath = filepath[1:]
return filepath
def create_title_page(self, filename: str, title: str = '') -> None:
"""
Output the title page.
"""
fobj = open(filename, "w")
fobj.write(self.header % (title, self.style))
fobj.write("<h1>%s</h1>\n" % title)
fobj.write("<h2>Memory distribution over time</h2>\n")
fobj.write(self.charts['snapshots'])
fobj.write("<h2>Snapshots statistics</h2>\n")
fobj.write('<table id="nb">\n')
classlist = list(self.index.keys())
classlist.sort()
for snapshot in self.snapshots:
fobj.write('<tr><td>\n')
fobj.write('<table id="tl" rules="rows">\n')
fobj.write("<h3>%s snapshot at %s</h3>\n" % (
snapshot.desc or 'Untitled',
pp_timestamp(snapshot.timestamp)
))
data = {}
data['sys'] = pp(snapshot.system_total.vsz)
data['tracked'] = pp(snapshot.tracked_total)
data['asizeof'] = pp(snapshot.asizeof_total)
data['overhead'] = pp(getattr(snapshot, 'overhead', 0))
fobj.write(self.snapshot_summary % data)
if snapshot.tracked_total:
fobj.write(self.snapshot_cls_header)
for classname in classlist:
if snapshot.classes:
info = snapshot.classes[classname].copy()
path = self.relative_path(self.links[classname])
info['cls'] = '<a href="%s">%s</a>' % (path, classname)
info['sum'] = pp(info['sum'])
info['avg'] = pp(info['avg'])
fobj.write(self.snapshot_cls % info)
fobj.write('</table>')
fobj.write('</td><td>\n')
if snapshot.tracked_total:
fobj.write(self.charts[snapshot])
fobj.write('</td></tr>\n')
fobj.write("</table>\n")
fobj.write(self.footer)
fobj.close()
def create_lifetime_chart(self, classname: str, filename: str = '') -> str:
"""
Create chart that depicts the lifetime of the instance registered with
`classname`. The output is written to `filename`.
"""
try:
from pylab import figure, title, xlabel, ylabel, plot, savefig
except ImportError:
return HtmlStats.nopylab_msg % (classname + " lifetime")
cnt = []
for tobj in self.index[classname]:
cnt.append([tobj.birth, 1])
if tobj.death:
cnt.append([tobj.death, -1])
cnt.sort()
for i in range(1, len(cnt)):
cnt[i][1] += cnt[i - 1][1]
x = [t for [t, c] in cnt]
y = [c for [t, c] in cnt]
figure()
xlabel("Execution time [s]")
ylabel("Instance #")
title("%s instances" % classname)
plot(x, y, 'o')
savefig(filename)
return self.chart_tag % (os.path.basename(filename))
def create_snapshot_chart(self, filename: str = '') -> str:
"""
Create chart that depicts the memory allocation over time apportioned
to the tracked classes.
"""
try:
from pylab import (figure, title, xlabel, ylabel, plot, fill,
legend, savefig)
import matplotlib.mlab as mlab
except ImportError:
return self.nopylab_msg % ("memory allocation")
classlist = self.tracked_classes
times = [snapshot.timestamp for snapshot in self.snapshots]
base = [0.0] * len(self.snapshots)
poly_labels = []
polys = []
for cn in classlist:
pct = [snapshot.classes[cn]['pct'] for snapshot in self.snapshots
if snapshot.classes is not None]
if pct and max(pct) > 3.0:
sz = [float(fp.classes[cn]['sum']) / (1024 * 1024)
for fp in self.snapshots
if fp.classes is not None]
sz = [sx + sy for sx, sy in zip(base, sz)]
xp, yp = mlab.poly_between(times, base, sz)
polys.append(((xp, yp), {'label': cn}))
poly_labels.append(cn)
base = sz
figure()
title("Snapshot Memory")
xlabel("Execution Time [s]")
ylabel("Virtual Memory [MiB]")
sizes = [float(fp.asizeof_total) / (1024 * 1024)
for fp in self.snapshots]
plot(times, sizes, 'r--', label='Total')
sizes = [float(fp.tracked_total) / (1024 * 1024)
for fp in self.snapshots]
plot(times, sizes, 'b--', label='Tracked total')
for (args, kwds) in polys:
fill(*args, **kwds)
legend(loc=2)
savefig(filename)
return self.chart_tag % (self.relative_path(filename))
def create_pie_chart(self, snapshot: 'Snapshot', filename: str = '') -> str:
"""
Create a pie chart that depicts the distribution of the allocated
memory for a given `snapshot`. The chart is saved to `filename`.
"""
try:
from pylab import figure, title, pie, axes, savefig
from pylab import sum as pylab_sum
except ImportError:
return self.nopylab_msg % ("pie_chart")
# Don't bother illustrating a pie without pieces.
if not snapshot.tracked_total or snapshot.classes is None:
return ''
classlist = []
sizelist = []
for k, v in list(snapshot.classes.items()):
if v['pct'] > 3.0:
classlist.append(k)
sizelist.append(v['sum'])
sizelist.insert(0, snapshot.asizeof_total - pylab_sum(sizelist))
classlist.insert(0, 'Other')
title("Snapshot (%s) Memory Distribution" % (snapshot.desc))
figure(figsize=(8, 8))
axes([0.1, 0.1, 0.8, 0.8])
pie(sizelist, labels=classlist)
savefig(filename, dpi=50)
return self.chart_tag % (self.relative_path(filename))
def create_html(self, fname: str, title: str = "ClassTracker Statistics"
) -> None:
"""
Create HTML page `fname` and additional files in a directory derived
from `fname`.
"""
# Create a folder to store the charts and additional HTML files.
self.basedir = os.path.dirname(os.path.abspath(fname))
self.filesdir = os.path.splitext(fname)[0] + '_files'
if not os.path.isdir(self.filesdir):
os.mkdir(self.filesdir)
self.filesdir = os.path.abspath(self.filesdir)
self.links = {} # type: Dict[str, str]
# Annotate all snapshots in advance
self.annotate()
# Create charts. The tags to show the images are returned and stored in
# the self.charts dictionary. This allows to return alternative text if
# the chart creation framework is not available.
self.charts = {} # type: Dict[Union[str, Snapshot], str]
fn = os.path.join(self.filesdir, 'timespace.png')
self.charts['snapshots'] = self.create_snapshot_chart(fn)
for fp, idx in zip(self.snapshots, list(range(len(self.snapshots)))):
fn = os.path.join(self.filesdir, 'fp%d.png' % (idx))
self.charts[fp] = self.create_pie_chart(fp, fn)
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_') + '-lt.png')
self.charts[cn] = self.create_lifetime_chart(cn, fn)
# Create HTML pages first for each class and then the index page.
for cn in list(self.index.keys()):
fn = os.path.join(self.filesdir, cn.replace('.', '_') + '.html')
self.links[cn] = fn
self.print_class_details(fn, cn)
self.create_title_page(fname, title=title)

View File

@@ -0,0 +1,80 @@
from pympler.refgraph import ReferenceGraph
from pympler.util.stringutils import trunc, pp
import sys
import gc
__all__ = ['GarbageGraph', 'start_debug_garbage', 'end_debug_garbage']
class GarbageGraph(ReferenceGraph):
"""
The ``GarbageGraph`` is a ``ReferenceGraph`` that illustrates the objects
building reference cycles. The garbage collector is switched to debug mode
(all identified garbage is stored in `gc.garbage`) and the garbage
collector is invoked. The collected objects are then illustrated in a
directed graph.
Large graphs can be reduced to the actual cycles by passing ``reduce=True``
to the constructor.
It is recommended to disable the garbage collector when using the
``GarbageGraph``.
>>> from pympler.garbagegraph import GarbageGraph, start_debug_garbage
>>> start_debug_garbage()
>>> l = []
>>> l.append(l)
>>> del l
>>> gb = GarbageGraph()
>>> gb.render('garbage.eps')
True
"""
def __init__(self, reduce=False, collectable=True):
"""
Initialize the GarbageGraph with the objects identified by the garbage
collector. If `collectable` is true, every reference cycle is recorded.
Otherwise only uncollectable objects are reported.
"""
if collectable:
gc.set_debug(gc.DEBUG_SAVEALL)
else:
gc.set_debug(0)
gc.collect()
ReferenceGraph.__init__(self, gc.garbage, reduce)
def print_stats(self, stream=None):
"""
Log annotated garbage objects to console or file.
:param stream: open file, uses sys.stdout if not given
"""
if not stream: # pragma: no cover
stream = sys.stdout
self.metadata.sort(key=lambda x: -x.size)
stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type',
'representation'))
for g in self.metadata:
stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size,
trunc(g.type, 12),
trunc(g.str, 46)))
stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' %
(self.count, self.num_in_cycles, pp(self.total_size)))
def start_debug_garbage():
"""
Turn off garbage collector to analyze *collectable* reference cycles.
"""
gc.collect()
gc.disable()
def end_debug_garbage():
"""
Turn garbage collection on and disable debug output.
"""
gc.set_debug(0)
gc.enable()

View File

@@ -0,0 +1,97 @@
"""
Memory usage profiler for Python.
"""
import inspect
import sys
from pympler import muppy
class MProfiler(object):
"""A memory usage profiler class.
Memory data for each function is stored as a 3-element list in the
dictionary self.memories. The index is always a codepoint (see below).
The following are the definitions of the members:
[0] = The number of times this function was called
[1] = Minimum memory consumption when this function was measured.
[2] = Maximum memory consumption when this function was measured.
A codepoint is a list of 3-tuple of the type
(filename, functionname, linenumber). You can omit either element, which
will cause the profiling to be triggered if any of the other criteria
match. E.g.
- (None, foo, None), will profile any foo function,
- (bar, foo, None) will profile only the foo function from the bar file,
- (bar, foo, 17) will profile only line 17 of the foo function defined
in the file bar.
Additionally, you can define on what events you want the profiling be
triggered. Possible events are defined in
http://docs.python.org/lib/debugger-hooks.html.
If you do not define either codepoints or events, the profiler will
record the memory usage in at every codepoint and event.
"""
def __init__(self, codepoints=None, events=None):
"""
keyword arguments:
codepoints -- a list of points in code to monitor (defaults to all
codepoints)
events -- a list of events to monitor (defaults to all events)
"""
self.memories = {}
self.codepoints = codepoints
self.events = events
def codepoint_included(self, codepoint):
"""Check if codepoint matches any of the defined codepoints."""
if self.codepoints is None:
return True
for cp in self.codepoints:
mismatch = False
for i in range(len(cp)):
if (cp[i] is not None) and (cp[i] != codepoint[i]):
mismatch = True
break
if not mismatch:
return True
return False
def profile(self, frame, event, arg): # arg req to match signature
"""Profiling method used to profile matching codepoints and events."""
if (self.events is None) or (event in self.events):
frame_info = inspect.getframeinfo(frame)
cp = (frame_info[0], frame_info[2], frame_info[1])
if self.codepoint_included(cp):
objects = muppy.get_objects()
size = muppy.get_size(objects)
if cp not in self.memories:
self.memories[cp] = [0, 0, 0, 0]
self.memories[cp][0] = 1
self.memories[cp][1] = size
self.memories[cp][2] = size
else:
self.memories[cp][0] += 1
if self.memories[cp][1] > size:
self.memories[cp][1] = size
if self.memories[cp][2] < size:
self.memories[cp][2] = size
def run(self, cmd):
sys.setprofile(self.profile)
try:
exec(cmd)
finally:
sys.setprofile(None)
return self
if __name__ == "__main__":
p = MProfiler()
p.run("print('hello')")
print(p.memories)

View File

@@ -0,0 +1,275 @@
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import gc
from pympler import summary
from pympler.util import compat
from inspect import isframe, stack
from sys import getsizeof
from pympler.asizeof import _Py_TPFLAGS_HAVE_GC
def ignore_object(obj: Any) -> bool:
try:
return isframe(obj)
except ReferenceError:
return True
def get_objects(remove_dups: bool = True, include_frames: bool = False
) -> List[Any]:
"""Return a list of all known objects excluding frame objects.
If (outer) frame objects shall be included, pass `include_frames=True`. In
order to prevent building reference cycles, the current frame object (of
the caller of get_objects) is ignored. This will not prevent creating
reference cycles if the object list is passed up the call-stack. Therefore,
frame objects are not included by default.
Keyword arguments:
remove_dups -- if True, all duplicate objects will be removed.
include_frames -- if True, includes frame objects.
"""
gc.collect()
# Do not initialize local variables before calling gc.get_objects or those
# will be included in the list. Furthermore, ignore frame objects to
# prevent reference cycles.
tmp = gc.get_objects()
tmp = [o for o in tmp if not ignore_object(o)]
res = []
for o in tmp:
# gc.get_objects returns only container objects, but we also want
# the objects referenced by them
refs = get_referents(o)
for ref in refs:
if not gc.is_tracked(ref):
# we already got the container objects, now we only add
# non-container objects
res.append(ref)
res.extend(tmp)
if remove_dups:
res = _remove_duplicates(res)
if include_frames:
for sf in stack()[2:]:
res.append(sf[0])
return res
def get_size(objects: List[Any]) -> int:
"""Compute the total size of all elements in objects."""
res = 0
for o in objects:
try:
res += getsizeof(o)
except AttributeError:
print("IGNORING: type=%s; o=%s" % (str(type(o)), str(o)))
return res
def get_diff(left: List[Any], right: List[Any]) -> Dict[str, List[Any]]:
"""Get the difference of both lists.
The result will be a dict with this form {'+': [], '-': []}.
Items listed in '+' exist only in the right list,
items listed in '-' exist only in the left list.
"""
res = {'+': [], '-': []} # type: Dict[str, List[Any]]
def partition(objects: List[Any]) -> Dict[type, List[Any]]:
"""Partition the passed object list."""
res = {} # type: Dict[type, List[Any]]
for o in objects:
t = type(o)
if type(o) not in res:
res[t] = []
res[t].append(o)
return res
def get_not_included(foo: List[Any], bar: Dict[type, List[Any]]
) -> List[Any]:
"""Compare objects from foo with objects defined in the values of
bar (set of partitions).
Returns a list of all objects included in list, but not dict values.
"""
res = [] # type: List[Any]
for o in foo:
if not compat.object_in_list(type(o), bar):
res.append(o)
elif not compat.object_in_list(o, bar[type(o)]):
res.append(o)
return res
# Create partitions of both lists. This will reduce the time required for
# the comparison
left_objects = partition(left)
right_objects = partition(right)
# and then do the diff
res['+'] = get_not_included(right, left_objects)
res['-'] = get_not_included(left, right_objects)
return res
def sort(objects: List[Any]) -> List[Any]:
"""Sort objects by size in bytes."""
objects = sorted(objects, key=getsizeof)
return objects
def filter(objects: List[Any], Type: Optional[type] = None, min: int = -1,
max: int = -1) -> List[Any]:
"""Filter objects.
The filter can be by type, minimum size, and/or maximum size.
Keyword arguments:
Type -- object type to filter by
min -- minimum object size
max -- maximum object size
"""
res = [] # type: List[Any]
if min > max and max > -1:
raise ValueError("minimum must be smaller than maximum")
if Type is not None:
objects = [o for o in objects if isinstance(o, Type)]
if min > -1:
objects = [o for o in objects if getsizeof(o) > min]
if max > -1:
objects = [o for o in objects if getsizeof(o) < max]
return objects
def get_referents(object: Any, level: int = 1) -> List[Any]:
"""Get all referents of an object up to a certain level.
The referents will not be returned in a specific order and
will not contain duplicate objects. Duplicate objects will be removed.
Keyword arguments:
level -- level of indirection to which referents considered.
This function is recursive.
"""
res = gc.get_referents(object)
level -= 1
if level > 0:
for o in res:
res.extend(get_referents(o, level))
res = _remove_duplicates(res)
return res
def _get_usage(function: Callable, *args: Any) -> Optional[List]:
"""Test if more memory is used after the function has been called.
The function will be invoked twice and only the second measurement will be
considered. Thus, memory used in initialisation (e.g. loading modules)
will not be included in the result. The goal is to identify memory leaks
caused by functions which use more and more memory.
Any arguments next to the function will be passed on to the function
on invocation.
Note that this function is currently experimental, because it is not
tested thoroughly and performs poorly.
"""
# The usage of a function is calculated by creating one summary of all
# objects before the function is invoked and afterwards. These summaries
# are compared and the diff is returned.
# This function works in a 2-steps process. Before the actual function is
# invoked an empty dummy function is measurement to identify the overhead
# involved in the measuring process. This overhead then is subtracted from
# the measurement performed on the passed function. The result reflects the
# actual usage of a function call.
# Also, a measurement is performed twice, allowing the adjustment to
# initializing things, e.g. modules
res = None
def _get_summaries(function: Callable, *args: Any) -> Tuple:
"""Get a 2-tuple containing one summary from before, and one summary
from after the function has been invoked.
"""
s_before = summary.summarize(get_objects())
function(*args)
s_after = summary.summarize(get_objects())
return (s_before, s_after)
def _get_usage(function: Callable, *args: Any) -> List:
"""Get the usage of a function call.
This function is to be used only internally. The 'real' get_usage
function is a wrapper around _get_usage, but the workload is done
here.
"""
# init before calling
(s_before, s_after) = _get_summaries(function, *args)
# ignore all objects used for the measurement
ignore = []
if s_before != s_after:
ignore.append(s_before)
for row in s_before:
# ignore refs from summary and frame (loop)
if len(gc.get_referrers(row)) == 2:
ignore.append(row)
for item in row:
# ignore refs from summary and frame (loop)
if len(gc.get_referrers(item)) == 2:
ignore.append(item)
for o in ignore:
s_after = summary._subtract(s_after, o)
res = summary.get_diff(s_before, s_after)
return summary._sweep(res)
# calibrate; twice for initialization
def noop() -> None:
pass
offset = _get_usage(noop)
offset = _get_usage(noop)
# perform operation twice to handle objects possibly used in
# initialisation
tmp = _get_usage(function, *args)
tmp = _get_usage(function, *args)
tmp = summary.get_diff(offset, tmp)
tmp = summary._sweep(tmp)
if len(tmp) != 0:
res = tmp
return res
def _is_containerobject(o: Any) -> bool:
"""Is the passed object a container object."""
return bool(getattr(type(o), '__flags__', 0) & _Py_TPFLAGS_HAVE_GC)
def _remove_duplicates(objects: List[Any]) -> List[Any]:
"""Remove duplicate objects.
Inspired by http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set() # type: Set[int]
result = []
for item in objects:
marker = id(item)
if marker in seen:
continue
seen.add(marker)
result.append(item)
return result
def print_summary() -> None:
"""Print a summary of all known objects."""
summary.print_(summary.summarize(get_objects()))

View File

@@ -0,0 +1,115 @@
"""
Expose a memory-profiling panel to the Django Debug toolbar.
Shows process memory information (virtual size, resident set size) and model
instances for the current request.
Requires Django and Django Debug toolbar:
https://github.com/jazzband/django-debug-toolbar
Pympler adds a memory panel as a third party addon (not included in the
django-debug-toolbar). It can be added by overriding the `DEBUG_TOOLBAR_PANELS`
setting in the Django project settings::
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.timer.TimerDebugPanel',
'pympler.panels.MemoryPanel',
)
Pympler also needs to be added to the `INSTALLED_APPS` in the Django settings::
INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar', 'pympler')
"""
from pympler.classtracker import ClassTracker
from pympler.process import ProcessMemoryInfo
from pympler.util.stringutils import pp
try:
from debug_toolbar.panels import Panel
from django.apps import apps
from django.template import Context, Template
from django.template.loader import render_to_string
from django.http.request import HttpRequest
from django.http.response import HttpResponse
except ImportError:
class Panel(object): # type: ignore
pass
class Template(object): # type: ignore
pass
class Context(object): # type: ignore
pass
class HttpRequest(object): # type: ignore
pass
class HttpResponse(object): # type: ignore
pass
class MemoryPanel(Panel):
name = 'pympler'
title = 'Memory'
template = 'memory_panel.html'
classes = [Context, Template]
def process_request(self, request: HttpRequest) -> HttpResponse:
self._tracker = ClassTracker()
for cls in apps.get_models() + self.classes:
self._tracker.track_class(cls)
self._tracker.create_snapshot('before')
self.record_stats({'before': ProcessMemoryInfo()})
response = super(MemoryPanel, self).process_request(request)
self.record_stats({'after': ProcessMemoryInfo()})
self._tracker.create_snapshot('after')
stats = self._tracker.stats
stats.annotate()
self.record_stats({'stats': stats})
return response
def enable_instrumentation(self) -> None:
self._tracker = ClassTracker()
for cls in apps.get_models() + self.classes:
self._tracker.track_class(cls)
def disable_instrumentation(self) -> None:
self._tracker.detach_all_classes()
def nav_subtitle(self) -> str:
context = self.get_stats()
before = context['before']
after = context['after']
rss = after.rss
delta = rss - before.rss
delta = ('(+%s)' % pp(delta)) if delta > 0 else ''
return "%s %s" % (pp(rss), delta)
@property
def content(self) -> str:
context = self.get_stats()
before = context['before']
after = context['after']
stats = context['stats']
rows = [('Resident set size', after.rss),
('Virtual size', after.vsz),
]
rows.extend(after - before)
rows = [(key, pp(value)) for key, value in rows]
rows.extend(after.os_specific)
classes = []
snapshot = stats.snapshots[-1]
for model in stats.tracked_classes:
history = [cnt for _, cnt in stats.history[model]]
size = snapshot.classes.get(model, {}).get('sum', 0)
if history and history[-1] > 0:
classes.append((model, history, pp(size)))
context.update({'rows': rows, 'classes': classes})
return render_to_string(self.template, context)

View File

@@ -0,0 +1,238 @@
"""
This module queries process memory allocation metrics from the operating
system. It provides a platform independent layer to get the amount of virtual
and physical memory allocated to the Python process.
Different mechanisms are implemented: Either the process stat file is read
(Linux), the `ps` command is executed (BSD/OSX/Solaris) or the resource module
is queried (Unix fallback). On Windows try to use the win32 module if
available. If all fails, return 0 for each attribute.
Windows without the win32 module is not supported.
>>> from pympler.process import ProcessMemoryInfo
>>> pmi = ProcessMemoryInfo()
>>> print ("Virtual size [Byte]: " + str(pmi.vsz)) # doctest: +ELLIPSIS
Virtual size [Byte]: ...
"""
from typing import Iterable, List, Tuple
import logging
import threading
from mmap import PAGESIZE # type: ignore
from os import getpid
from subprocess import Popen, PIPE
from pympler.util.stringutils import pp
class _ProcessMemoryInfo(object):
"""Stores information about various process-level memory metrics. The
virtual size is stored in attribute `vsz`, the physical memory allocated to
the process in `rss`, and the number of (major) pagefaults in `pagefaults`.
On Linux, `data_segment`, `code_segment`, `shared_segment` and
`stack_segment` contain the number of Bytes allocated for the respective
segments. This is an abstract base class which needs to be overridden by
operating system specific implementations. This is done when importing the
module.
"""
pagesize = PAGESIZE
def __init__(self) -> None:
self.pid = getpid()
self.rss = 0
self.vsz = 0
self.pagefaults = 0
self.os_specific = [] # type: List[Tuple[str, str]]
self.data_segment = 0
self.code_segment = 0
self.shared_segment = 0
self.stack_segment = 0
self.available = self.update()
def __repr__(self) -> str:
return "<%s vsz=%d rss=%d>" % (self.__class__.__name__,
self.vsz, self.rss)
def update(self) -> bool:
"""
Refresh the information using platform instruments. Returns true if
this operation yields useful values on the current platform.
"""
return False # pragma: no cover
def __sub__(self, other: '_ProcessMemoryInfo') -> Iterable[Tuple[str, int]]:
diff = [('Resident set size (delta)', self.rss - other.rss),
('Virtual size (delta)', self.vsz - other.vsz),
]
return diff
ProcessMemoryInfo = _ProcessMemoryInfo # type: type
def is_available() -> bool:
"""
Convenience function to check if the current platform is supported by this
module.
"""
return ProcessMemoryInfo().update()
class _ProcessMemoryInfoPS(_ProcessMemoryInfo):
def update(self) -> bool:
"""
Get virtual and resident size of current process via 'ps'.
This should work for MacOS X, Solaris, Linux. Returns true if it was
successful.
"""
try:
p = Popen(['/bin/ps', '-p%s' % self.pid, '-o', 'rss,vsz'],
stdout=PIPE, stderr=PIPE)
except OSError: # pragma: no cover
pass
else:
s = p.communicate()[0].split()
if p.returncode == 0 and len(s) >= 2: # pragma: no branch
self.vsz = int(s[-1]) * 1024
self.rss = int(s[-2]) * 1024
return True
return False # pragma: no cover
class _ProcessMemoryInfoProc(_ProcessMemoryInfo):
key_map = {
'VmPeak': 'Peak virtual memory size',
'VmSize': 'Virtual memory size',
'VmLck': 'Locked memory size',
'VmHWM': 'Peak resident set size',
'VmRSS': 'Resident set size',
'VmStk': 'Size of stack segment',
'VmData': 'Size of data segment',
'VmExe': 'Size of code segment',
'VmLib': 'Shared library code size',
'VmPTE': 'Page table entries size',
}
def update(self) -> bool:
"""
Get virtual size of current process by reading the process' stat file.
This should work for Linux.
"""
try:
stat = open('/proc/self/stat')
status = open('/proc/self/status')
except IOError: # pragma: no cover
return False
else:
stats = stat.read().split()
self.vsz = int(stats[22])
self.rss = int(stats[23]) * self.pagesize
self.pagefaults = int(stats[11])
for entry in status.readlines():
try:
key, value = entry.split(':', 1)
except ValueError:
continue
value = value.strip()
def size_in_bytes(x: str) -> int:
return int(x.split()[0]) * 1024
if key == 'VmData':
self.data_segment = size_in_bytes(value)
elif key == 'VmExe':
self.code_segment = size_in_bytes(value)
elif key == 'VmLib':
self.shared_segment = size_in_bytes(value)
elif key == 'VmStk':
self.stack_segment = size_in_bytes(value)
key = self.key_map.get(key, '')
if key:
self.os_specific.append((key, pp(size_in_bytes(value))))
stat.close()
status.close()
return True
try:
from resource import getrusage, RUSAGE_SELF
class _ProcessMemoryInfoResource(_ProcessMemoryInfo):
def update(self) -> bool:
"""
Get memory metrics of current process through `getrusage`. Only
available on Unix, on Linux most of the fields are not set,
and on BSD units are used that are not very helpful, see:
http://www.perlmonks.org/?node_id=626693
Furthermore, getrusage only provides accumulated statistics (e.g.
max rss vs current rss).
"""
usage = getrusage(RUSAGE_SELF)
self.rss = usage.ru_maxrss * 1024
self.data_segment = usage.ru_idrss * 1024 # TODO: ticks?
self.shared_segment = usage.ru_ixrss * 1024 # TODO: ticks?
self.stack_segment = usage.ru_isrss * 1024 # TODO: ticks?
self.vsz = (self.data_segment + self.shared_segment +
self.stack_segment)
self.pagefaults = usage.ru_majflt
return self.rss != 0
if _ProcessMemoryInfoProc().update(): # pragma: no branch
ProcessMemoryInfo = _ProcessMemoryInfoProc
elif _ProcessMemoryInfoPS().update(): # pragma: no cover
ProcessMemoryInfo = _ProcessMemoryInfoPS
elif _ProcessMemoryInfoResource().update(): # pragma: no cover
ProcessMemoryInfo = _ProcessMemoryInfoResource
except ImportError:
try:
# Requires pywin32
from win32process import GetProcessMemoryInfo
from win32api import GetCurrentProcess, GlobalMemoryStatusEx
except ImportError:
logging.warn("Please install pywin32 when using pympler on Windows.")
else:
class _ProcessMemoryInfoWin32(_ProcessMemoryInfo):
def update(self) -> bool:
process_handle = GetCurrentProcess()
meminfo = GetProcessMemoryInfo(process_handle)
memstatus = GlobalMemoryStatusEx()
self.vsz = (memstatus['TotalVirtual'] -
memstatus['AvailVirtual'])
self.rss = meminfo['WorkingSetSize']
self.pagefaults = meminfo['PageFaultCount']
return True
ProcessMemoryInfo = _ProcessMemoryInfoWin32
class ThreadInfo(object):
"""Collect information about an active thread."""
def __init__(self, thread: threading.Thread):
self.ident = thread.ident
self.name = thread.name
self.daemon = thread.daemon
def get_current_threads() -> Iterable[ThreadInfo]:
"""Get a list of `ThreadInfo` objects."""
return [ThreadInfo(thread) for thread in threading.enumerate()]
def get_current_thread_id() -> int:
"""Get the ID of the current thread."""
return threading.get_ident()

View File

@@ -0,0 +1,451 @@
"""Tree-like exploration of object referrers.
This module provides a base implementation for tree-like referrers browsing.
The two non-interactive classes ConsoleBrowser and FileBrowser output a tree
to the console or a file. One graphical user interface for referrers browsing
is provided as well. Further types can be subclassed.
All types share a similar initialisation. That is, you provide a root object
and may specify further settings such as the initial depth of the tree or an
output function.
Afterwards you can print the tree which will be arranged based on your previous
settings.
The interactive browser is based on a TreeWidget implemented in IDLE. It is
available only if you have Tcl/Tk installed. If you try to instantiate the
interactive browser without having Tkinter installed, an ImportError will be
raised.
"""
import gc
import inspect
import sys
from pympler import muppy
from pympler import summary
from pympler.util.compat import tkinter
class _Node(object):
"""A node as it is used in the tree structure.
Each node contains the object it represents and a list of children.
Children can be other nodes or arbitrary other objects. Any object
in a tree which is not of the type _Node is considered a leaf.
"""
def __init__(self, o, str_func=None):
"""You have to define the object this node represents. Also you can
define an output function which will be used to represent this node.
If no function is defined, the default str representation is used.
keyword arguments
str_func -- output function
"""
self.o = o
self.children = []
self.str_func = str_func
def __str__(self):
"""Override str(self.o) if str_func is defined."""
if self.str_func is not None:
return self.str_func(self.o)
else:
return str(self.o)
class RefBrowser(object):
"""Base class to other RefBrowser implementations.
This base class provides means to extract a tree from a given root object
and holds information on already known objects (to avoid repetition
if requested).
"""
def __init__(self, rootobject, maxdepth=3, str_func=summary._repr,
repeat=True, stream=None):
"""You have to provide the root object used in the refbrowser.
keyword arguments
maxdepth -- maximum depth of the initial tree
str_func -- function used when calling str(node)
repeat -- should nodes appear repeatedly in the tree, or should be
referred to existing nodes
stream -- output stream (used in derived classes)
"""
self.root = rootobject
self.maxdepth = maxdepth
self.str_func = str_func
self.repeat = repeat
self.stream = stream
# objects which should be ignored while building the tree
# e.g. the current frame
self.ignore = []
# set of object ids which are already included
self.already_included = set()
self.ignore.append(self.already_included)
def get_tree(self):
"""Get a tree of referrers of the root object."""
self.ignore.append(inspect.currentframe())
return self._get_tree(self.root, self.maxdepth)
def _get_tree(self, root, maxdepth):
"""Workhorse of the get_tree implementation.
This is a recursive method which is why we have a wrapper method.
root is the current root object of the tree which should be returned.
Note that root is not of the type _Node.
maxdepth defines how much further down the from the root the tree
should be build.
"""
objects = gc.get_referrers(root)
res = _Node(root, self.str_func)
self.already_included.add(id(root))
if maxdepth == 0:
return res
self.ignore.append(inspect.currentframe())
self.ignore.append(objects)
for o in objects:
# Ignore dict of _Node and RefBrowser objects
if isinstance(o, dict):
if any(isinstance(ref, (_Node, RefBrowser))
for ref in gc.get_referrers(o)):
continue
_id = id(o)
if not self.repeat and (_id in self.already_included):
s = self.str_func(o)
res.children.append("%s (already included, id %s)" %
(s, _id))
continue
if (not isinstance(o, _Node)) and (o not in self.ignore):
res.children.append(self._get_tree(o, maxdepth - 1))
return res
class StreamBrowser(RefBrowser):
"""RefBrowser implementation which prints the tree to the console.
If you don't like the looks, you can change it a little bit.
The class attributes 'hline', 'vline', 'cross', and 'space' can be
modified to your needs.
"""
hline = '-'
vline = '|'
cross = '+'
space = ' '
def print_tree(self, tree=None):
""" Print referrers tree to console.
keyword arguments
tree -- if not None, the passed tree will be printed. Otherwise it is
based on the rootobject.
"""
if tree is None:
tree = self.get_tree()
self._print(tree, '', '')
def _print(self, tree, prefix, carryon):
"""Compute and print a new line of the tree.
This is a recursive function.
arguments
tree -- tree to print
prefix -- prefix to the current line to print
carryon -- prefix which is used to carry on the vertical lines
"""
level = prefix.count(self.cross) + prefix.count(self.vline)
len_children = 0
if isinstance(tree, _Node):
len_children = len(tree.children)
# add vertex
prefix += str(tree)
# and as many spaces as the vertex is long
carryon += self.space * len(str(tree))
if (level == self.maxdepth) or (not isinstance(tree, _Node)) or\
(len_children == 0):
self.stream.write(prefix + '\n')
return
else:
# add in between connections
prefix += self.hline
carryon += self.space
# if there is more than one branch, add a cross
if len(tree.children) > 1:
prefix += self.cross
carryon += self.vline
prefix += self.hline
carryon += self.space
if len_children > 0:
# print the first branch (on the same line)
self._print(tree.children[0], prefix, carryon)
for b in range(1, len_children):
# the carryon becomes the prefix for all following children
prefix = carryon[:-2] + self.cross + self.hline
# remove the vlines for any children of last branch
if b == (len_children - 1):
carryon = carryon[:-2] + 2 * self.space
self._print(tree.children[b], prefix, carryon)
# leave a free line before the next branch
if b == (len_children - 1):
if len(carryon.strip(' ')) == 0:
return
self.stream.write(carryon[:-2].rstrip() + '\n')
class ConsoleBrowser(StreamBrowser):
"""RefBrowser that prints to the console (stdout)."""
def __init__(self, *args, **kwargs):
super(ConsoleBrowser, self).__init__(*args, **kwargs)
if not self.stream:
self.stream = sys.stdout
class FileBrowser(StreamBrowser):
"""RefBrowser implementation which prints the tree to a file."""
def print_tree(self, filename, tree=None):
""" Print referrers tree to file (in text format).
keyword arguments
tree -- if not None, the passed tree will be printed.
"""
old_stream = self.stream
self.stream = open(filename, 'w')
try:
super(FileBrowser, self).print_tree(tree=tree)
finally:
self.stream.close()
self.stream = old_stream
# Code for interactive browser (GUI)
# ==================================
# The interactive browser requires Tkinter which is not always available. To
# avoid an import error when loading the module, we encapsulate most of the
# code in the following try-except-block. The InteractiveBrowser itself
# remains outside this block. If you try to instantiate it without having
# Tkinter installed, the import error will be raised.
try:
if sys.version_info < (3, 5, 2):
from idlelib import TreeWidget as _TreeWidget
else:
from idlelib import tree as _TreeWidget
class _TreeNode(_TreeWidget.TreeNode):
"""TreeNode used by the InteractiveBrowser.
Not to be confused with _Node. This one is used in the GUI
context.
"""
def reload_referrers(self):
"""Reload all referrers for this _TreeNode."""
self.item.node = self.item.reftree._get_tree(self.item.node.o, 1)
self.item._clear_children()
self.expand()
self.update()
def print_object(self):
"""Print object which this _TreeNode represents to console."""
print(self.item.node.o)
def drawtext(self):
"""Override drawtext from _TreeWidget.TreeNode.
This seems to be a good place to add the popup menu.
"""
_TreeWidget.TreeNode.drawtext(self)
# create a menu
menu = tkinter.Menu(self.canvas, tearoff=0)
menu.add_command(label="reload referrers",
command=self.reload_referrers)
menu.add_command(label="print", command=self.print_object)
menu.add_separator()
menu.add_command(label="expand", command=self.expand)
menu.add_separator()
# the popup only disappears when to click on it
menu.add_command(label="Close Popup Menu")
def do_popup(event):
menu.post(event.x_root, event.y_root)
self.label.bind("<Button-3>", do_popup)
# override, i.e. disable the editing of items
# disable editing of TreeNodes
def edit(self, event=None):
pass # see comment above
def edit_finish(self, event=None):
pass # see comment above
def edit_cancel(self, event=None):
pass # see comment above
class _ReferrerTreeItem(_TreeWidget.TreeItem, tkinter.Label):
"""Tree item wrapper around _Node object."""
def __init__(self, parentwindow, node, reftree): # constr calls
"""You need to provide the parent window, the node this TreeItem
represents, as well as the tree (_Node) which the node
belongs to.
"""
_TreeWidget.TreeItem.__init__(self)
tkinter.Label.__init__(self, parentwindow)
self.node = node
self.parentwindow = parentwindow
self.reftree = reftree
def _clear_children(self):
"""Clear children list from any TreeNode instances.
Normally these objects are not required for memory profiling, as
they are part of the profiler.
"""
new_children = []
for child in self.node.children:
if not isinstance(child, _TreeNode):
new_children.append(child)
self.node.children = new_children
def GetText(self):
return str(self.node)
def GetIconName(self):
"""Different icon when object cannot be expanded, i.e. has no
referrers.
"""
if not self.IsExpandable():
return "python"
def IsExpandable(self):
"""An object is expandable when it is a node which has children and
is a container object.
"""
if not isinstance(self.node, _Node):
return False
else:
if len(self.node.children) > 0:
return True
else:
return muppy._is_containerobject(self.node.o)
def GetSubList(self):
"""This method is the point where further referrers are computed.
Thus, the computation is done on-demand and only when needed.
"""
sublist = []
children = self.node.children
if (len(children) == 0) and\
(muppy._is_containerobject(self.node.o)):
self.node = self.reftree._get_tree(self.node.o, 1)
self._clear_children()
children = self.node.children
for child in children:
item = _ReferrerTreeItem(self.parentwindow, child,
self.reftree)
sublist.append(item)
return sublist
except ImportError:
_TreeWidget = None
def gui_default_str_function(o):
"""Default str function for InteractiveBrowser."""
return summary._repr(o) + '(id=%s)' % id(o)
class InteractiveBrowser(RefBrowser):
"""Interactive referrers browser.
The interactive browser is based on a TreeWidget implemented in IDLE. It is
available only if you have Tcl/Tk installed. If you try to instantiate the
interactive browser without having Tkinter installed, an ImportError will
be raised.
"""
def __init__(self, rootobject, maxdepth=3,
str_func=gui_default_str_function, repeat=True):
"""You have to provide the root object used in the refbrowser.
keyword arguments
maxdepth -- maximum depth of the initial tree
str_func -- function used when calling str(node)
repeat -- should nodes appear repeatedly in the tree, or should be
referred to existing nodes
"""
if tkinter is None:
raise ImportError(
"InteractiveBrowser requires Tkinter to be installed.")
RefBrowser.__init__(self, rootobject, maxdepth, str_func, repeat)
def main(self, standalone=False):
"""Create interactive browser window.
keyword arguments
standalone -- Set to true, if the browser is not attached to other
windows
"""
window = tkinter.Tk()
sc = _TreeWidget.ScrolledCanvas(window, bg="white",
highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = _ReferrerTreeItem(window, self.get_tree(), self)
node = _TreeNode(sc.canvas, None, item)
node.expand()
if standalone:
window.mainloop()
# list to hold to referrers
superlist = []
root = "root"
for i in range(3):
tmp = [root]
superlist.append(tmp)
def foo(o):
return str(type(o))
def print_sample():
cb = ConsoleBrowser(root, str_func=foo)
cb.print_tree()
def write_sample():
fb = FileBrowser(root, str_func=foo)
fb.print_tree('sample.txt')
if __name__ == "__main__":
write_sample()

View File

@@ -0,0 +1,350 @@
"""
This module exposes utilities to illustrate objects and their references as
(directed) graphs. The current implementation requires 'graphviz' to be
installed.
"""
from pympler.asizeof import Asizer, named_refs
from pympler.util.stringutils import safe_repr, trunc
from gc import get_referents
from subprocess import Popen, PIPE
from copy import copy
from sys import platform
__all__ = ['ReferenceGraph']
# Popen might lead to deadlocks when file descriptors are leaked to
# sub-processes on Linux. On Windows, however, close_fds=True leads to
# ValueError if stdin/stdout/stderr is piped:
# http://code.google.com/p/pympler/issues/detail?id=28#c1
popen_flags = {}
if platform not in ['win32']: # pragma: no branch
popen_flags['close_fds'] = True
class _MetaObject(object):
"""
The _MetaObject stores meta-information, like a string representation,
corresponding to each object passed to a ReferenceGraph.
"""
__slots__ = ('size', 'id', 'type', 'str', 'group', 'cycle')
def __init__(self):
self.cycle = False
class _Edge(object):
"""
Describes a reference from one object `src` to another object `dst`.
"""
__slots__ = ('src', 'dst', 'label', 'group')
def __init__(self, src, dst, label):
self.src = src
self.dst = dst
self.label = label
self.group = None
def __repr__(self):
return "<%08x => %08x, '%s', %s>" % (self.src, self.dst, self.label,
self.group)
def __hash__(self):
return (self.src, self.dst, self.label).__hash__()
def __eq__(self, other):
return self.__hash__() == other.__hash__()
class ReferenceGraph(object):
"""
The ReferenceGraph illustrates the references between a collection of
objects by rendering a directed graph. That requires that 'graphviz' is
installed.
>>> from pympler.refgraph import ReferenceGraph
>>> a = 42
>>> b = 'spam'
>>> c = {a: b}
>>> gb = ReferenceGraph([a,b,c])
>>> gb.render('spam.eps')
True
"""
def __init__(self, objects, reduce=False):
"""
Initialize the ReferenceGraph with a collection of `objects`.
"""
self.objects = list(objects)
self.count = len(self.objects)
self.num_in_cycles = 'N/A'
self.edges = None
if reduce:
self.num_in_cycles = self._reduce_to_cycles()
self._reduced = self # TODO: weakref?
else:
self._reduced = None
self._get_edges()
self._annotate_objects()
def _eliminate_leafs(self, graph):
"""
Eliminate leaf objects - that are objects not referencing any other
objects in the list `graph`. Returns the list of objects without the
objects identified as leafs.
"""
result = []
idset = set([id(x) for x in graph])
for n in graph:
refset = set([id(x) for x in get_referents(n)])
if refset.intersection(idset):
result.append(n)
return result
def _reduce_to_cycles(self):
"""
Iteratively eliminate leafs to reduce the set of objects to only those
that build cycles. Return the number of objects involved in reference
cycles. If there are no cycles, `self.objects` will be an empty list
and this method returns 0.
"""
cycles = self.objects[:]
cnt = 0
while cnt != len(cycles):
cnt = len(cycles)
cycles = self._eliminate_leafs(cycles)
self.objects = cycles
return len(self.objects)
def reduce_to_cycles(self):
"""
Iteratively eliminate leafs to reduce the set of objects to only those
that build cycles. Return the reduced graph. If there are no cycles,
None is returned.
"""
if not self._reduced:
reduced = copy(self)
reduced.objects = self.objects[:]
reduced.metadata = []
reduced.edges = []
self.num_in_cycles = reduced._reduce_to_cycles()
reduced.num_in_cycles = self.num_in_cycles
if self.num_in_cycles:
reduced._get_edges()
reduced._annotate_objects()
for meta in reduced.metadata:
meta.cycle = True
else:
reduced = None
self._reduced = reduced
return self._reduced
def _get_edges(self):
"""
Compute the edges for the reference graph.
The function returns a set of tuples (id(a), id(b), ref) if a
references b with the referent 'ref'.
"""
idset = set([id(x) for x in self.objects])
self.edges = set([])
for n in self.objects:
refset = set([id(x) for x in get_referents(n)])
for ref in refset.intersection(idset):
label = ''
members = None
if isinstance(n, dict):
members = n.items()
if not members:
members = named_refs(n)
for (k, v) in members:
if id(v) == ref:
label = k
break
self.edges.add(_Edge(id(n), ref, label))
def _annotate_groups(self):
"""
Annotate the objects belonging to separate (non-connected) graphs with
individual indices.
"""
g = {}
for x in self.metadata:
g[x.id] = x
idx = 0
for x in self.metadata:
if not hasattr(x, 'group'):
x.group = idx
idx += 1
neighbors = set()
for e in self.edges:
if e.src == x.id:
neighbors.add(e.dst)
if e.dst == x.id:
neighbors.add(e.src)
for nb in neighbors:
g[nb].group = min(x.group, getattr(g[nb], 'group', idx))
# Assign the edges to the respective groups. Both "ends" of the edge
# should share the same group so just use the first object's group.
for e in self.edges:
e.group = g[e.src].group
self._max_group = idx
def _filter_group(self, group):
"""
Eliminate all objects but those which belong to `group`.
``self.objects``, ``self.metadata`` and ``self.edges`` are modified.
Returns `True` if the group is non-empty. Otherwise returns `False`.
"""
self.metadata = [x for x in self.metadata if x.group == group]
group_set = set([x.id for x in self.metadata])
self.objects = [obj for obj in self.objects if id(obj) in group_set]
self.count = len(self.metadata)
if self.metadata == []:
return False
self.edges = [e for e in self.edges if e.group == group]
del self._max_group
return True
def split(self):
"""
Split the graph into sub-graphs. Only connected objects belong to the
same graph. `split` yields copies of the Graph object. Shallow copies
are used that only replicate the meta-information, but share the same
object list ``self.objects``.
>>> from pympler.refgraph import ReferenceGraph
>>> a = 42
>>> b = 'spam'
>>> c = {a: b}
>>> t = (1,2,3)
>>> rg = ReferenceGraph([a,b,c,t])
>>> for subgraph in rg.split():
... print (subgraph.index)
0
1
"""
self._annotate_groups()
index = 0
for group in range(self._max_group):
subgraph = copy(self)
subgraph.metadata = self.metadata[:]
subgraph.edges = self.edges.copy()
if subgraph._filter_group(group):
subgraph.total_size = sum([x.size for x in subgraph.metadata])
subgraph.index = index
index += 1
yield subgraph
def split_and_sort(self):
"""
Split the graphs into sub graphs and return a list of all graphs sorted
by the number of nodes. The graph with most nodes is returned first.
"""
graphs = list(self.split())
graphs.sort(key=lambda x: -len(x.metadata))
for index, graph in enumerate(graphs):
graph.index = index
return graphs
def _annotate_objects(self):
"""
Extract meta-data describing the stored objects.
"""
self.metadata = []
sizer = Asizer()
sizes = sizer.asizesof(*self.objects)
self.total_size = sizer.total
for obj, sz in zip(self.objects, sizes):
md = _MetaObject()
md.size = sz
md.id = id(obj)
try:
md.type = obj.__class__.__name__
except (AttributeError, ReferenceError): # pragma: no cover
md.type = type(obj).__name__
md.str = safe_repr(obj, clip=128)
self.metadata.append(md)
def _get_graphviz_data(self):
"""
Emit a graph representing the connections between the objects described
within the metadata list. The text representation can be transformed to
a graph with graphviz. Returns a string.
"""
s = []
header = '// Process this file with graphviz\n'
s.append(header)
s.append('digraph G {\n')
s.append(' node [shape=box];\n')
for md in self.metadata:
label = trunc(md.str, 48).replace('"', "'")
extra = ''
if md.type == 'instancemethod':
extra = ', color=red'
elif md.type == 'frame':
extra = ', color=orange'
s.append(' "X%s" [ label = "%s\\n%s" %s ];\n' %
(hex(md.id)[1:], label, md.type, extra))
for e in self.edges:
extra = ''
if e.label == '__dict__':
extra = ',weight=100'
s.append(' X%s -> X%s [label="%s"%s];\n' %
(hex(e.src)[1:], hex(e.dst)[1:], e.label, extra))
s.append('}\n')
return "".join(s)
def render(self, filename, cmd='dot', format='ps', unflatten=False):
"""
Render the graph to `filename` using graphviz. The graphviz invocation
command may be overridden by specifying `cmd`. The `format` may be any
specifier recognized by the graph renderer ('-Txxx' command). The
graph can be preprocessed by the *unflatten* tool if the `unflatten`
parameter is True. If there are no objects to illustrate, the method
does not invoke graphviz and returns False. If the renderer returns
successfully (return code 0), True is returned.
An `OSError` is raised if the graphviz tool cannot be found.
"""
if self.objects == []:
return False
data = self._get_graphviz_data()
options = ('-Nfontsize=10',
'-Efontsize=10',
'-Nstyle=filled',
'-Nfillcolor=#E5EDB8',
'-Ncolor=#CCCCCC')
cmdline = (cmd, '-T%s' % format, '-o', filename) + options
if unflatten:
p1 = Popen(('unflatten', '-l7'), stdin=PIPE, stdout=PIPE,
**popen_flags)
p2 = Popen(cmdline, stdin=p1.stdout, **popen_flags)
p1.communicate(data.encode())
p2.communicate()
return p2.returncode == 0
else:
p = Popen(cmdline, stdin=PIPE, **popen_flags)
p.communicate(data.encode())
return p.returncode == 0
def write_graph(self, filename):
"""
Write raw graph data which can be post-processed using graphviz.
"""
f = open(filename, 'w')
f.write(self._get_graphviz_data())
f.close()

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,321 @@
"""A collection of functions to summarize object information.
This module provides several function which will help you to analyze object
information which was gathered. Often it is sufficient to work with aggregated
data instead of handling the entire set of existing objects. For example can a
memory leak identified simple based on the number and size of existing objects.
A summary contains information about objects in a table-like manner.
Technically, it is a list of lists. Each of these lists represents a row,
whereas the first column reflects the object type, the second column the number
of objects, and the third column the size of all these objects. This allows a
simple table-like output like the following:
============= ============ =============
types # objects total size
============= ============ =============
<type 'dict'> 2 560
<type 'str'> 3 126
<type 'int'> 4 96
<type 'long'> 2 66
<type 'list'> 1 40
============= ============ =============
Another advantage of summaries is that they influence the system you analyze
only to a minimum. Working with references to existing objects will keep these
objects alive. Most of the times this is no desired behavior (as it will have
an impact on the observations). Using summaries reduces this effect greatly.
output representation
---------------------
The output representation of types is defined in summary.representations.
Every type defined in this dictionary will be represented as specified. Each
definition has a list of different representations. The later a representation
appears in this list, the higher its verbosity level. From types which are not
defined in summary.representations the default str() representation will be
used.
Per default, summaries will use the verbosity level 1 for any encountered type.
The reason is that several computations are done with summaries and rows have
to remain comparable. Therefore information which reflect an objects state,
e.g. the current line number of a frame, should not be included. You may add
more detailed information at higher verbosity levels than 1.
"""
import re
import sys
import types
from pympler.util import stringutils
from sys import getsizeof
representations = {}
def _init_representations():
global representations
if sys.hexversion < 0x2040000:
classobj = [
lambda c: "classobj(%s)" % repr(c),
]
representations[types.ClassType] = classobj
instance = [
lambda f: "instance(%s)" % repr(f.__class__),
]
representations[types.InstanceType] = instance
instancemethod = [
lambda i: "instancemethod (%s)" % (repr(i.im_func)),
lambda i: "instancemethod (%s, %s)" % (repr(i.im_class),
repr(i.im_func)),
]
representations[types.MethodType] = instancemethod
frame = [
lambda f: "frame (codename: %s)" % (f.f_code.co_name),
lambda f: "frame (codename: %s, codeline: %s)" %
(f.f_code.co_name, f.f_code.co_firstlineno),
lambda f: "frame (codename: %s, filename: %s, codeline: %s)" %
(f.f_code.co_name, f.f_code.co_filename,
f.f_code.co_firstlineno)
]
representations[types.FrameType] = frame
_dict = [
lambda d: str(type(d)),
lambda d: "dict, len=%s" % len(d),
]
representations[dict] = _dict
function = [
lambda f: "function (%s)" % f.__name__,
lambda f: "function (%s.%s)" % (f.__module__, f.__name__),
]
representations[types.FunctionType] = function
_list = [
lambda l: str(type(l)),
lambda l: "list, len=%s" % len(l)
]
representations[list] = _list
module = [lambda m: "module(%s)" % getattr(
m, '__name__', getattr(m, '__file__', 'nameless, id: %d' % id(m))
)]
representations[types.ModuleType] = module
_set = [
lambda s: str(type(s)),
lambda s: "set, len=%s" % len(s)
]
representations[set] = _set
_init_representations()
def summarize(objects):
"""Summarize an objects list.
Return a list of lists, whereas each row consists of::
[str(type), number of objects of this type, total size of these objects].
No guarantee regarding the order is given.
"""
count = {}
total_size = {}
for o in objects:
otype = _repr(o)
if otype in count:
count[otype] += 1
total_size[otype] += getsizeof(o)
else:
count[otype] = 1
total_size[otype] = getsizeof(o)
rows = []
for otype in count:
rows.append([otype, count[otype], total_size[otype]])
return rows
def get_diff(left, right):
"""Get the difference of two summaries.
Subtracts the values of the right summary from the values of the left
summary.
If similar rows appear on both sides, the are included in the summary with
0 for number of elements and total size.
If the number of elements of a row of the diff is 0, but the total size is
not, it means that objects likely have changed, but not there number, thus
resulting in a changed size.
"""
res = []
right_by_key = dict((r[0], r) for r in right)
left_by_key = dict((r[0], r) for r in left)
keys = set(right_by_key)
keys.update(left_by_key)
for key in keys:
r = right_by_key.get(key)
l = left_by_key.get(key)
if l and r:
res.append([key, r[1] - l[1], r[2] - l[2]])
elif r:
res.append(r)
elif l:
res.append([key, -l[1], -l[2]])
else:
continue # shouldn't happen
return res
def format_(rows, limit=15, sort='size', order='descending'):
"""Format the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
localrows = []
for row in rows:
localrows.append(list(row))
# input validation
sortby = ['type', '#', 'size']
if sort not in sortby:
raise ValueError("invalid sort, should be one of" + str(sortby))
orders = ['ascending', 'descending']
if order not in orders:
raise ValueError("invalid order, should be one of" + str(orders))
# sort rows
if sortby.index(sort) == 0:
if order == "ascending":
localrows.sort(key=lambda x: _repr(x[0]))
elif order == "descending":
localrows.sort(key=lambda x: _repr(x[0]), reverse=True)
else:
if order == "ascending":
localrows.sort(key=lambda x: x[sortby.index(sort)])
elif order == "descending":
localrows.sort(key=lambda x: x[sortby.index(sort)], reverse=True)
# limit rows
localrows = localrows[0:limit]
for row in localrows:
row[2] = stringutils.pp(row[2])
# print rows
localrows.insert(0, ["types", "# objects", "total size"])
return _format_table(localrows)
def _format_table(rows, header=True):
"""Format a list of lists as a pretty table.
Keyword arguments:
header -- if True the first row is treated as a table header
inspired by http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/267662
"""
border = "="
# vertical delimiter
vdelim = " | "
# padding nr. of spaces are left around the longest element in the
# column
padding = 1
# may be left,center,right
justify = 'right'
justify = {'left': str.ljust,
'center': str.center,
'right': str.rjust}[justify.lower()]
# calculate column widths (longest item in each col
# plus "padding" nr of spaces on both sides)
cols = zip(*rows)
colWidths = [max([len(str(item)) + 2 * padding for item in col])
for col in cols]
borderline = vdelim.join([w * border for w in colWidths])
for row in rows:
yield vdelim.join([justify(str(item), width)
for (item, width) in zip(row, colWidths)])
if header:
yield borderline
header = False
def print_(rows, limit=15, sort='size', order='descending'):
"""Print the rows as a summary.
Keyword arguments:
limit -- the maximum number of elements to be listed
sort -- sort elements by 'size', 'type', or '#'
order -- sort 'ascending' or 'descending'
"""
for line in format_(rows, limit=limit, sort=sort, order=order):
print(line)
# regular expressions used by _repr to replace default type representations
type_repr = re.compile(r"^<(type|class) '(\S+)'>$")
address = re.compile(r' at 0x[0-9a-f]+')
def _repr(o, verbosity=1):
"""Get meaning object representation.
This function should be used when the simple str(o) output would result in
too general data. E.g. "<type 'instance'" is less meaningful than
"instance: Foo".
Keyword arguments:
verbosity -- if True the first row is treated as a table header
"""
res = ""
t = type(o)
if (verbosity == 0) or (t not in representations):
res = str(t)
else:
verbosity -= 1
if len(representations[t]) <= verbosity:
verbosity = len(representations[t]) - 1
res = representations[t][verbosity](o)
res = address.sub('', res)
res = type_repr.sub(r'\2', res)
return res
def _traverse(summary, function, *args):
"""Traverse all objects of a summary and call function with each as a
parameter.
Using this function, the following objects will be traversed:
- the summary
- each row
- each item of a row
"""
function(summary, *args)
for row in summary:
function(row, *args)
for item in row:
function(item, *args)
def _subtract(summary, o):
"""Remove object o from the summary by subtracting it's size."""
found = False
row = [_repr(o), 1, getsizeof(o)]
for r in summary:
if r[0] == row[0]:
(r[1], r[2]) = (r[1] - row[1], r[2] - row[2])
found = True
if not found:
summary.append([row[0], -row[1], -row[2]])
return summary
def _sweep(summary):
"""Remove all rows in which the total size and the total number of
objects is zero.
"""
return [row for row in summary if ((row[2] != 0) or (row[1] != 0))]

View File

@@ -0,0 +1,9 @@
%for ref in referents:
<div class="referents">
<span class="local_name">{{ref.name}}</span>
<span class="local_size">{{ref.size}}</span>
%if ref.refs:
%include('asized_referents', referents=ref.refs)
%end
</div>
%end

View File

@@ -0,0 +1,6 @@
</div>
</div>
</div>
</div>
</body>
</html>

View File

@@ -0,0 +1,33 @@
%include('header', category='Garbage', title='Garbage')
<h1>Garbage - Cycle {{index}}</h1>
<table class="tdata" width="100%">
<thead>
<tr>
<th>id</th>
<th class="num">size</th>
<th>type</th>
<th>representation</th>
</tr>
</thead>
<tbody>
%for o in objects:
<tr>
<td>{{'0x%08x' % o.id}}</td>
<td class="num">{{o.size}}</td>
<td>{{o.type}}</td>
<td>{{o.str}}</td>
</tr>
%end
</tbody>
</table>
<h2>Reference graph</h2>
<img src="/garbage/graph/{{index}}"/>
<h2>Reduced reference graph (cycles only)</h2>
<img src="/garbage/graph/{{index}}?reduce=1"/>
%include('footer')

View File

@@ -0,0 +1,41 @@
%include('header', category='Garbage', title='Garbage')
<h1>Garbage - Overview</h1>
<p>This page gives an overview of all objects that would have been
deleted if those weren't holding circular references to each other
(e.g. in a doubly linked list).</p>
%if len(graphs):
<p>Click on the reference graph titles below to show the objects
contained in the respective cycle. If you have <a
href="http://www.graphviz.org">graphviz</a> installed, you will
also see a visualisation of the reference cycle.</p>
<p>{{len(graphs)}} reference cycles:</p>
<table class="tdata">
<thead>
<tr>
<th>Reference graph</th>
<th># objects</th>
<th># cycle objects</th>
<th>Total size</th>
</tr>
</thead>
<tbody>
%for graph in graphs:
<tr>
<td><a href="/garbage/{{graph.index}}">Cycle {{graph.index}}</a></td>
<td class="num">{{len(graph.metadata)}}</td>
<td class="num">{{graph.num_in_cycles}}</td>
<td class="num">{{graph.total_size}}</td>
</tr>
%end
</tbody>
</table>
%else:
<p>No reference cycles detected.</p>
%end
%include('footer')

View File

@@ -0,0 +1,36 @@
<html>
<head>
<title>Pympler - {{title}}</title>
<link rel="stylesheet" type="text/css" href="/static/style.css">
<script src="http://code.jquery.com/jquery-1.10.1.min.js" type="text/javascript"></script>
</head>
%navbar = [
% ("overview", "/", ""),
% ("|", "", ""),
% ("process", "/process", ""),
% ("|", "", ""),
% ("tracked objects", "/tracker", ""),
% ("|", "", ""),
% ("garbage", "/garbage", ""),
% ("help", "/help", "right"),]
<body>
<div class="related">
<ul>
%for link, href, cls in navbar:
<li class="{{cls}}">
%if bool(href):
<a href="{{href}}"><span>{{link}}</span></a>
%else:
<span>{{link}}</span>
%end
</li>
%end
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">

View File

@@ -0,0 +1,26 @@
%include('header', category='Overview', title='Overview')
%from pympler.util.stringutils import pp
<h1>Python application memory profile</h1>
<h2>Process overview</h2>
<table class="tdata">
<tbody>
<tr>
<th>Virtual size:</th>
<td class="num">{{pp(processinfo.vsz)}}</td>
</tr>
<tr>
<th>Physical memory size:</th>
<td class="num">{{pp(processinfo.rss)}}</td>
</tr>
<tr>
<th>Major pagefaults:</th>
<td class="num">{{processinfo.pagefaults}}</td>
</tr>
</tbody>
</table>
%include('footer')

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,7 @@
/* Javascript plotting library for jQuery, version 0.8.3.
Copyright (c) 2007-2014 IOLA and Ole Laursen.
Licensed under the MIT license.
*/
(function($){var options={series:{stack:null}};function init(plot){function findMatchingSeries(s,allseries){var res=null;for(var i=0;i<allseries.length;++i){if(s==allseries[i])break;if(allseries[i].stack==s.stack)res=allseries[i]}return res}function stackData(plot,s,datapoints){if(s.stack==null||s.stack===false)return;var other=findMatchingSeries(s,plot.getData());if(!other)return;var ps=datapoints.pointsize,points=datapoints.points,otherps=other.datapoints.pointsize,otherpoints=other.datapoints.points,newpoints=[],px,py,intery,qx,qy,bottom,withlines=s.lines.show,horizontal=s.bars.horizontal,withbottom=ps>2&&(horizontal?datapoints.format[2].x:datapoints.format[2].y),withsteps=withlines&&s.lines.steps,fromgap=true,keyOffset=horizontal?1:0,accumulateOffset=horizontal?0:1,i=0,j=0,l,m;while(true){if(i>=points.length)break;l=newpoints.length;if(points[i]==null){for(m=0;m<ps;++m)newpoints.push(points[i+m]);i+=ps}else if(j>=otherpoints.length){if(!withlines){for(m=0;m<ps;++m)newpoints.push(points[i+m])}i+=ps}else if(otherpoints[j]==null){for(m=0;m<ps;++m)newpoints.push(null);fromgap=true;j+=otherps}else{px=points[i+keyOffset];py=points[i+accumulateOffset];qx=otherpoints[j+keyOffset];qy=otherpoints[j+accumulateOffset];bottom=0;if(px==qx){for(m=0;m<ps;++m)newpoints.push(points[i+m]);newpoints[l+accumulateOffset]+=qy;bottom=qy;i+=ps;j+=otherps}else if(px>qx){if(withlines&&i>0&&points[i-ps]!=null){intery=py+(points[i-ps+accumulateOffset]-py)*(qx-px)/(points[i-ps+keyOffset]-px);newpoints.push(qx);newpoints.push(intery+qy);for(m=2;m<ps;++m)newpoints.push(points[i+m]);bottom=qy}j+=otherps}else{if(fromgap&&withlines){i+=ps;continue}for(m=0;m<ps;++m)newpoints.push(points[i+m]);if(withlines&&j>0&&otherpoints[j-otherps]!=null)bottom=qy+(otherpoints[j-otherps+accumulateOffset]-qy)*(px-qx)/(otherpoints[j-otherps+keyOffset]-qx);newpoints[l+accumulateOffset]+=bottom;i+=ps}fromgap=false;if(l!=newpoints.length&&withbottom)newpoints[l+2]+=bottom}if(withsteps&&l!=newpoints.length&&l>0&&newpoints[l]!=null&&newpoints[l]!=newpoints[l-ps]&&newpoints[l+1]!=newpoints[l-ps+1]){for(m=0;m<ps;++m)newpoints[l+ps+m]=newpoints[l+m];newpoints[l+1]=newpoints[l-ps+1]}}datapoints.points=newpoints}plot.hooks.processDatapoints.push(stackData)}$.plot.plugins.push({init:init,options:options,name:"stack",version:"1.2"})})(jQuery);

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,58 @@
{% load i18n %}{% load static %}
<script type="text/javascript" src="{% static 'jquery.sparkline.min.js' %}"></script>
<table>
<colgroup>
<col style="width:20%"/>
<col/>
</colgroup>
<thead>
<tr>
<th>{% trans "Resource" %}</th>
<th>{% trans "Value" %}</th>
</tr>
</thead>
<tbody>
{% for key, value in rows %}
<tr class="{% cycle 'djDebugOdd' 'djDebugEven' %}">
<td>{{ key|escape }}</td>
<td>{{ value|escape }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<table>
<colgroup>
<col style="width:20%"/>
<col/>
<col/>
</colgroup>
<thead>
<tr>
<th>{% trans "Class" %}</th>
<th>{% trans "Number of instances" %} <a class="show_sparkline" href="#">Show sparklines</a></th>
<th>{% trans "Total size" %}</th>
</tr>
</thead>
<tbody>
{% for cls, history, size in classes %}
<tr class="{% cycle 'djDebugOdd' 'djDebugEven' %}">
<td>{{ cls|escape }}</td>
<td id="{{ cls|escape|cut:'.' }}_history" values="{{ history|join:',' }}">
{{ history|safeseq|join:', ' }}
</td>
<td>{{ size|escape }}</td>
</tr>
{% endfor %}
</tbody>
</table>
<script type="text/javascript">
(function ($) {
window.jQuery = $; // for jquery.sparkline
$("#MemoryPanel .show_sparkline").on('click', function() {
{% for cls, _, _ in classes %}
$("#{{ cls|escape|cut:'.' }}_history").sparkline('html', {width: '200px'});
{% endfor %}
});
})(jQuery || djdt.jQuery);
</script>

View File

@@ -0,0 +1,89 @@
%include('header', category='Process', title='Process Information')
%from pympler.util.stringutils import pp
<h1>Process information</h1>
<table class="tdata">
<tbody>
<tr>
<th>Virtual size:</th>
<td class="num">{{pp(info.vsz)}}</td>
</tr>
<tr>
<th>Physical memory size:</th>
<td class="num">{{pp(info.rss)}}</td>
</tr>
<tr>
<th>Major pagefaults:</th>
<td class="num">{{info.pagefaults}}</td>
</tr>
%for key, value in info.os_specific:
<tr>
<th>{{key}}:</th>
<td class="num">{{value}}</td>
</tr>
%end
</tbody>
</table>
<h2>Thread information</h2>
<table class="tdata">
<tbody>
<tr>
<th>ID</th>
<th>Name</th>
<th>Daemon</th>
</tr>
%for tinfo in threads:
<tr>
<td>{{tinfo.ident}}</td>
<td>{{tinfo.name}}</td>
<td>{{tinfo.daemon}}</td>
</tr>
%end
</tbody>
</table>
<h2>Thread stacks</h2>
<div class="stacks">
%for tinfo in threads:
<div class="stacktrace" id="{{tinfo.ident}}">
<a class="show_traceback" href="#">Traceback for thread {{tinfo.name}}</a>
</div>
%end
</div>
<script type="text/javascript">
$(".show_traceback").click(function() {
var tid = $(this).parent().attr("id");
$.get("/traceback/"+tid, function(data) {
$("#"+tid).replaceWith(data);
});
return false;
});
$(".stacks").delegate(".expand_local", "click", function() {
var oid = $(this).attr("id");
$.get("/objects/"+oid, function(data) {
$("#"+oid).replaceWith(data);
});
return false;
});
$(".stacks").delegate(".expand_ref", "click", function() {
var node_id = $(this).attr("id");
var oid = node_id.split("_")[0];
$.get("/objects/"+oid, function(data) {
$("#children_"+node_id).append(data);
});
$(this).removeClass("expand_ref").addClass("toggle_ref");
return false;
});
$(".stacks").delegate(".toggle_ref", "click", function() {
var node_id = $(this).attr("id");
$("#children_"+node_id).toggle();
return false;
});
</script>
%include('footer')

View File

@@ -0,0 +1,13 @@
%from random import randint
%for name, (ref, type_name, obj_repr, size) in referents.items():
%ref = "%s_%s" % (ref, randint(0, 65535))
<div class="referents">
<a class="expand_ref" id="{{ref}}" href="#">
<span class="local_name">{{name}}</span>
<span class="local_type">{{type_name}}</span>
<span class="local_size">{{size}}</span>
<span class="local_value">{{obj_repr}}</span>
</a>
<span id="children_{{ref}}"/>
</div>
%end

View File

@@ -0,0 +1,30 @@
<div class="stacktrace">
<strong>Stacktrace for thread {{threadid}}</strong>
%for frame in stack:
<div class="stackframe">
<span class="filename">{{frame[1]}}</span>
<span class="lineno">{{frame[2]}}</span>
<span class="function">{{frame[3]}}</span>
%if frame[4]:
%context = frame[4]
<div class="context">
%highlight = len(context) / 2
%for idx, line in enumerate(frame[4]):
%hl = (idx == highlight) and "highlighted" or ""
%if line.strip():
<div class="{{hl}}" style="padding-left:{{len(line)-len(line.lstrip())}}em" width="100%">
{{line.strip()}}
</div>
%end
%end
</div>
%end
<div class="local">
<a class="expand_local" id="{{frame[0]}}" href="#">Show locals</a>
</div>
</div>
%end
%if not stack:
Cannot retrieve stacktrace for thread {{threadid}}.
%end
</div>

View File

@@ -0,0 +1,992 @@
/**
* Sphinx Doc Design
*/
body {
font-family: "Verdana", "Tahoma", Sans-Serif;
font-size: 90%;
background-color: #11303d;
color: #000;
margin: 0;
padding: 0;
}
/* :::: LAYOUT :::: */
div.document {
background-color: #158906;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 0;
}
div.body {
background-color: white;
padding: 0 20px 16px 20px;
}
div.sphinxsidebarwrapper {
padding: 10px 5px 0 10px;
}
div.sphinxsidebar {
display: none;
}
div.sphinxsidebar {
float: right;
width: 230px;
margin-left: -100%;
font-size: 90%;
}
div.clearer {
clear: both;
}
div.footer {
color: #fff;
width: 100%;
padding: 9px 0 9px 0;
text-align: center;
font-size: 75%;
}
div.footer a {
color: #fff;
text-decoration: underline;
}
div.related {
background-color: #5A3D31;
color: #fff;
width: 100%;
height: 30px;
line-height: 30px;
font-size: 90%;
}
div.related h3 {
display: none;
}
div.related ul {
margin: 0;
padding: 0 0 0 10px;
list-style: none;
}
div.related li {
display: inline;
}
div.related li.right {
float: right;
margin-right: 5px;
}
div.related a {
color: white;
}
/* ::: TOC :::: */
div.sphinxsidebar h3 {
color: white;
font-size: 1.4em;
font-weight: normal;
margin: 0;
padding: 0;
}
div.sphinxsidebar h4 {
color: white;
font-size: 1.3em;
font-weight: normal;
margin: 5px 0 0 0;
padding: 0;
}
div.sphinxsidebar p {
color: white;
}
div.sphinxsidebar p.topless {
margin: 5px 10px 10px 10px;
}
div.sphinxsidebar ul {
margin: 10px;
padding: 0;
list-style: none;
color: white;
}
div.sphinxsidebar ul ul,
div.sphinxsidebar ul.want-points {
margin-left: 20px;
list-style: square;
}
div.sphinxsidebar ul ul {
margin-top: 0;
margin-bottom: 0;
}
div.sphinxsidebar a {
color: #A4FF98;
}
div.sphinxsidebar form {
margin-top: 10px;
}
div.sphinxsidebar input {
border: 1px solid #A4FF98;
font-size: 1em;
}
/* :::: MODULE CLOUD :::: */
div.modulecloud {
margin: -5px 10px 5px 10px;
padding: 10px;
line-height: 160%;
border: 1px solid #cbe7e5;
background-color: #f2fbfd;
}
div.modulecloud a {
padding: 0 5px 0 5px;
}
/* :::: SEARCH :::: */
ul.search {
margin: 10px 0 0 20px;
padding: 0;
}
ul.search li {
padding: 5px 0 5px 20px;
background-image: url(file.png);
background-repeat: no-repeat;
background-position: 0 7px;
}
ul.search li a {
font-weight: bold;
}
ul.search li div.context {
color: #888;
margin: 2px 0 0 30px;
text-align: left;
}
ul.keywordmatches li.goodmatch a {
font-weight: bold;
}
/* :::: COMMON FORM STYLES :::: */
div.actions {
padding: 5px 10px 5px 10px;
border-top: 1px solid #cbe7e5;
border-bottom: 1px solid #cbe7e5;
background-color: #e0f6f4;
}
form dl {
color: #333;
}
form dt {
clear: both;
float: left;
min-width: 110px;
margin-right: 10px;
padding-top: 2px;
}
input#homepage {
display: none;
}
div.error {
margin: 5px 20px 0 0;
padding: 5px;
border: 1px solid #d00;
font-weight: bold;
}
/* :::: INLINE COMMENTS :::: */
div.inlinecomments {
position: absolute;
right: 20px;
}
div.inlinecomments a.bubble {
display: block;
float: right;
background-image: url(style/comment.png);
background-repeat: no-repeat;
width: 25px;
height: 25px;
text-align: center;
padding-top: 3px;
font-size: 0.9em;
line-height: 14px;
font-weight: bold;
color: black;
}
div.inlinecomments a.bubble span {
display: none;
}
div.inlinecomments a.emptybubble {
background-image: url(style/nocomment.png);
}
div.inlinecomments a.bubble:hover {
background-image: url(style/hovercomment.png);
text-decoration: none;
color: #3ca0a4;
}
div.inlinecomments div.comments {
float: right;
margin: 25px 5px 0 0;
max-width: 50em;
min-width: 30em;
border: 1px solid #2eabb0;
background-color: #f2fbfd;
z-index: 150;
}
div#comments {
border: 1px solid #2eabb0;
margin-top: 20px;
}
div#comments div.nocomments {
padding: 10px;
font-weight: bold;
}
div.inlinecomments div.comments h3,
div#comments h3 {
margin: 0;
padding: 0;
background-color: #2eabb0;
color: white;
border: none;
padding: 3px;
}
div.inlinecomments div.comments div.actions {
padding: 4px;
margin: 0;
border-top: none;
}
div#comments div.comment {
margin: 10px;
border: 1px solid #2eabb0;
}
div.inlinecomments div.comment h4,
div.commentwindow div.comment h4,
div#comments div.comment h4 {
margin: 10px 0 0 0;
background-color: #2eabb0;
color: white;
border: none;
padding: 1px 4px 1px 4px;
}
div#comments div.comment h4 {
margin: 0;
}
div#comments div.comment h4 a {
color: #d5f4f4;
}
div.inlinecomments div.comment div.text,
div.commentwindow div.comment div.text,
div#comments div.comment div.text {
margin: -5px 0 -5px 0;
padding: 0 10px 0 10px;
}
div.inlinecomments div.comment div.meta,
div.commentwindow div.comment div.meta,
div#comments div.comment div.meta {
text-align: right;
padding: 2px 10px 2px 0;
font-size: 95%;
color: #538893;
border-top: 1px solid #cbe7e5;
background-color: #e0f6f4;
}
div.commentwindow {
position: absolute;
width: 500px;
border: 1px solid #cbe7e5;
background-color: #f2fbfd;
display: none;
z-index: 130;
}
div.commentwindow h3 {
margin: 0;
background-color: #2eabb0;
color: white;
border: none;
padding: 5px;
font-size: 1.5em;
cursor: pointer;
}
div.commentwindow div.actions {
margin: 10px -10px 0 -10px;
padding: 4px 10px 4px 10px;
color: #538893;
}
div.commentwindow div.actions input {
border: 1px solid #2eabb0;
background-color: white;
color: #135355;
cursor: pointer;
}
div.commentwindow div.form {
padding: 0 10px 0 10px;
}
div.commentwindow div.form input,
div.commentwindow div.form textarea {
border: 1px solid #3c9ea2;
background-color: white;
color: black;
}
div.commentwindow div.error {
margin: 10px 5px 10px 5px;
background-color: #fbe5dc;
display: none;
}
div.commentwindow div.form textarea {
width: 99%;
}
div.commentwindow div.preview {
margin: 10px 0 10px 0;
background-color: #70d0d4;
padding: 0 1px 1px 25px;
}
div.commentwindow div.preview h4 {
margin: 0 0 -5px -20px;
padding: 4px 0 0 4px;
color: white;
font-size: 1.3em;
}
div.commentwindow div.preview div.comment {
background-color: #f2fbfd;
}
div.commentwindow div.preview div.comment h4 {
margin: 10px 0 0 0!important;
padding: 1px 4px 1px 4px!important;
font-size: 1.2em;
}
/* :::: SUGGEST CHANGES :::: */
div#suggest-changes-box input, div#suggest-changes-box textarea {
border: 1px solid #ccc;
background-color: white;
color: black;
}
div#suggest-changes-box textarea {
width: 99%;
height: 400px;
}
/* :::: PREVIEW :::: */
div.preview {
background-image: url(style/preview.png);
padding: 0 20px 20px 20px;
margin-bottom: 30px;
}
/* :::: INDEX PAGE :::: */
table.contentstable {
width: 90%;
}
table.contentstable p.biglink {
line-height: 150%;
}
a.biglink {
font-size: 1.3em;
}
span.linkdescr {
font-style: italic;
padding-top: 5px;
font-size: 90%;
}
/* :::: INDEX STYLES :::: */
table.indextable td {
text-align: left;
vertical-align: top;
}
table.indextable dl, table.indextable dd {
margin-top: 0;
margin-bottom: 0;
}
table.indextable tr.pcap {
height: 10px;
}
table.indextable tr.cap {
margin-top: 10px;
background-color: #f2f2f2;
}
img.toggler {
margin-right: 3px;
margin-top: 3px;
cursor: pointer;
}
form.pfform {
margin: 10px 0 20px 0;
}
/* :::: GLOBAL STYLES :::: */
.docwarning {
background-color: #ffe4e4;
padding: 10px;
margin: 0 -20px 0 -20px;
border-bottom: 1px solid #f66;
}
p.subhead {
font-weight: bold;
margin-top: 20px;
}
a {
color: #355F7C;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-weight: bold;
margin: 16px -20px 16px -20px;
padding: 3px 0 3px 10px;
}
div.body h1 {
margin-top: 0;
font-size: 140%;
background-color: #E5EDB8;
color: #000;
border-bottom: 1px solid #ccc;
}
div.body h2 {
font-size: 120%;
background-color: #E5EDB8;
color: #000;
border-top: 1px solid #ccc;
border-bottom: 1px solid #ccc;
}
div.body h3 {
font-size: 120%;
border-bottom: 1px dashed #ccc;
}
div.body h4 { font-size: 120%; }
div.body h5 { font-size: 110%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #c60f0f;
font-size: 0.8em;
padding: 0 4px 0 4px;
text-decoration: none;
visibility: hidden;
}
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
h4:hover > a.headerlink,
h5:hover > a.headerlink,
h6:hover > a.headerlink,
dt:hover > a.headerlink {
visibility: visible;
}
a.headerlink:hover {
background-color: #c60f0f;
color: white;
}
div.body p, div.body dd, div.body li {
line-height: 100%;
}
div.body p.caption {
text-align: inherit;
}
div.body td {
text-align: left;
}
ul.fakelist {
list-style: none;
margin: 10px 0 10px 20px;
padding: 0;
}
.field-list ul {
padding-left: 1em;
}
.first {
margin-top: 0 !important;
}
/* "Footnotes" heading */
p.rubric {
margin-top: 30px;
font-weight: bold;
}
/* "Topics" */
div.topic {
background-color: #eee;
border: 1px solid #ccc;
padding: 0 7px 0 7px;
margin: 10px 0 10px 0;
}
p.topic-title {
font-size: 1.1em;
font-weight: bold;
margin-top: 10px;
}
/* Admonitions */
div.admonition {
margin-top: 10px;
margin-bottom: 10px;
padding: 7px;
}
div.admonition dt {
font-weight: bold;
}
div.admonition dl {
margin-bottom: 0;
}
div.admonition p {
display: inline;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.warning {
background-color: #ffe4e4;
border: 1px solid #f66;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
p.admonition-title {
margin: 0px 10px 5px 0px;
font-weight: bold;
display: inline;
}
p.admonition-title:after {
content: ":";
}
div.body p.centered {
text-align: center;
margin-top: 25px;
}
table.docutils {
border: 0;
}
table.docutils td, table.docutils th {
padding: 1px 8px 1px 0;
border-top: 0;
border-left: 0;
border-right: 0;
border-bottom: 1px solid #aaa;
}
table.field-list td, table.field-list th {
border: 0 !important;
}
table.footnote td, table.footnote th {
border: 0 !important;
}
.field-list ul {
margin: 0;
padding-left: 1em;
}
.field-list p {
margin: 0;
}
dl {
margin-bottom: 1px;
clear: both;
}
dd p {
margin-top: 0px;
}
dd ul, dd table {
margin-bottom: 10px;
}
dd {
margin-top: 3px;
margin-bottom: 10px;
margin-left: 30px;
}
.refcount {
color: #060;
}
dt:target,
.highlight {
background-color: #fbe54e;
}
dl.glossary dt {
font-weight: bold;
font-size: 1.1em;
}
th {
text-align: left;
padding-right: 5px;
}
pre {
padding: 5px;
background-color: #efc;
color: #333;
border: 1px solid #ac9;
border-left: none;
border-right: none;
overflow: auto;
}
td.linenos pre {
padding: 5px 0px;
border: 0;
background-color: transparent;
color: #aaa;
}
table.highlighttable {
margin-left: 0.5em;
}
table.highlighttable td {
padding: 0 0.5em 0 0.5em;
}
tt {
background-color: #ecf0f3;
padding: 0 1px 0 1px;
font-size: 0.95em;
}
tt.descname {
background-color: transparent;
font-weight: bold;
font-size: 1.2em;
}
tt.descclassname {
background-color: transparent;
}
tt.xref, a tt {
background-color: transparent;
font-weight: bold;
}
.footnote:target { background-color: #ffa }
h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
background-color: transparent;
}
.optional {
font-size: 1.3em;
}
.versionmodified {
font-style: italic;
}
form.comment {
margin: 0;
padding: 10px 30px 10px 30px;
background-color: #eee;
}
form.comment h3 {
background-color: #326591;
color: white;
margin: -10px -30px 10px -30px;
padding: 5px;
font-size: 1.4em;
}
form.comment input,
form.comment textarea {
border: 1px solid #ccc;
padding: 2px;
font-size: 100%;
}
form.comment input[type="text"] {
width: 240px;
}
form.comment textarea {
width: 100%;
height: 200px;
margin-bottom: 10px;
}
.system-message {
background-color: #fda;
padding: 5px;
border: 3px solid red;
}
/* :::: PRINT :::: */
@media print {
div.document,
div.documentwrapper,
div.bodywrapper {
margin: 0;
width : 100%;
}
div.sphinxsidebar,
div.related,
div.footer,
div#comments div.new-comment-box,
#top-link {
display: none;
}
}
/* :::: OWN :::: */
.section ul {
margin: 0.2em;
}
/* Data tables */
table.tdata
{
font-size: 12px;
background: #fff;
border-collapse: collapse;
text-align: left;
}
table.tdata th
{
font-size: 14px;
font-weight: normal;
padding: 8px 8px;
background-color: #E5EDB8;
border-bottom: 1px solid #ccc;
}
table.tdata tr:first-child th:first-child
{
border-top-left-radius: 8px;
}
table.tdata tr:first-child th:last-child
{
border-top-right-radius: 8px;
}
table.tdata td
{
border-bottom: 1px solid #ccc;
padding: 8px 8px;
}
table.tdata tbody tr:hover td
{
color: #060;
}
table th.num,
table td.num
{
text-align: right;
}
div.stacktrace
{
-moz-border-radius: 6px;
-webkit-border-radius: 6px;
border-radius: 6px;
border: 1px solid #999;
padding: 6px;
margin-bottom: 1em;
}
div.stacktrace div.stackframe
{
margin: 8px;
}
div.stackframe .context,
div.stackframe .local
{
-moz-border-radius: 4px;
-webkit-border-radius: 4px;
border-radius: 4px;
padding: 4px;
margin-left: 10px;
margin-top: 4px;
background: #f2f2f2;
}
div.stackframe div.context div.highlighted
{
background: #E5EDB8;
}
div.stackframe span.lineno,
div.stackframe span.filename
{
}
div.stackframe span.function
{
font-weight: bold;
}
div.stackframe span.function,
div.stackframe div.context
{
font-family: monospace;
}
div.local,
div.referents
{
margin-top: 4px;
margin-bottom: 4px;
margin-left: 10px;
}
div.stackframe div.local
{
background: #fafafa;
}
div.referents span.local_name,
div.referents span.local_size
{
font-family: monospace;
padding: 1px;
}
div.referents span.local_value,
div.referents span.local_size,
div.referents span.local_type
{
font-family: monospace;
margin-left: 1em;
}
div.referents span.local_type
{
color: #999;
}
div.referents span.local_size
{
color: #03F;
}
div.local div.referents a
{
color: #000;
text-decoration: none;
}
div.local div.referents a:hover
{
background-color: #EEE;
color: #090;
text-decoration: none;
}

View File

@@ -0,0 +1,127 @@
%include('header', category='Tracker', title='Tracked objects')
%from pympler.util.stringutils import pp, pp_timestamp
%from json import dumps
<h1>Tracked objects</h1>
%if snapshots:
<h2>Memory distribution over time</h2>
<div id="memory_chart_flot" style="width:100%; height: 400px"></div>
<script type="text/javascript" src="/static/jquery.flot.min.js"></script>
<script type="text/javascript" src="/static/jquery.flot.stack.min.js"></script>
<script type="text/javascript" src="/static/jquery.flot.tooltip.min.js"></script>
<script type="text/javascript">
function format_size(value) {
var val = Math.round(value / (1000*1000));
return val.toLocaleString() + ' MB';
};
$(document).ready(function() {
var timeseries = {{!timeseries}};
var options = {
xaxis: {
show: false,
},
yaxis: {
tickFormatter: format_size
},
grid: {
hoverable: true
},
tooltip: true,
tooltipOpts: {
content: "%s | %y"
},
legend: {
position: "nw"
},
series: {
bars: {
show: true,
barWidth: .9,
fillColor: { colors: [ { opacity: 0.9 }, { opacity: 0.9 } ] },
align: "center"
},
stack: true
}
};
$.plot($('#memory_chart_flot'), timeseries, options);
});
</script>
<h2>Snapshots statistics</h2>
%for sn in snapshots:
<h3>{{sn.desc or 'Untitled'}} snapshot at {{pp_timestamp(sn.timestamp)}}</h3>
<table class="tdata">
<thead>
<tr>
<th width="20%">Class</th>
<th width="20%" class="num">Instance #</th>
<th width="20%" class="num">Total</th>
<th width="20%" class="num">Average size</th>
<th width="20%" class="num">Share</th>
</tr>
</thead>
<tbody>
%cnames = list(sn.classes.keys())
%cnames.sort()
%for cn in cnames:
%data = sn.classes[cn]
<tr>
<td><a href="/tracker/class/{{cn}}">{{cn}}</a></td>
<td class="num">{{data['active']}}</td>
<td class="num">{{pp(data['sum'])}}</td>
<td class="num">{{pp(data['avg'])}}</td>
<td class="num">{{'%3.2f%%' % data['pct']}}</td>
</tr>
%end
</tbody>
</table>
%if sn.system_total.available:
<h4>Process memory</h4>
<table class="tdata">
<thead>
<tr>
<th>Type</th>
<th class="num">Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>Virtual memory size</td>
<td class="num">{{pp(sn.system_total.vsz)}}</td>
</tr>
<tr>
<td>Resident set size</td>
<td class="num">{{pp(sn.system_total.rss)}}</td>
</tr>
<tr>
<td>Pagefaults</td>
<td class="num">{{sn.system_total.pagefaults}}</td>
</tr>
%for key, value in sn.system_total.os_specific:
<tr>
<td>{{key}}</td>
<td class="num">{{value}}</td>
</tr>
%end
</tbody>
</table>
%end
%end
%else:
<p>No objects are currently tracked. Consult the Pympler documentation for
instructions of how to use the classtracker module.</p>
%end
%include('footer')

View File

@@ -0,0 +1,79 @@
%include('header', category='Tracker', title=clsname)
%from pympler.util.stringutils import pp, pp_timestamp
<h1>{{clsname}}</h1>
%sizes = [tobj.get_max_size() for tobj in stats.index[clsname]]
<p>{{len(stats.index[clsname])}} instances of {{clsname}} were registered. The
average size is {{pp(sum(sizes)/len(sizes))}}, the minimal size is
{{pp(min(sizes))}}, the maximum size is {{pp(max(sizes))}}.</p>
<h2>Coalesced Referents per Snapshot</h2>
%for snapshot in stats.snapshots:
%if clsname in snapshot.classes:
%merged = snapshot.classes[clsname]['merged']
<h3>Snapshot: {{snapshot.desc}}</h3>
<p>{{pp(merged.size)}} occupied by instances of class {{clsname}}</p>
%if merged.refs:
%include('asized_referents', referents=merged.refs)
%else:
<p>No per-referent sizes recorded.</p>
%end
%end
%end
<h2>Instances</h2>
%for tobj in stats.index[clsname]:
<table class="tdata" width="100%" rules="rows">
<tr>
<th width="140px">Instance</th>
<td>{{tobj.name}} at {{'0x%08x' % tobj.id}}</td>
</tr>
%if tobj.repr:
<tr>
<th>Representation</th>
<td>{{tobj.repr}}&nbsp;</td>
</tr>
%end
<tr>
<th>Lifetime</th>
<td>{{pp_timestamp(tobj.birth)}} - {{pp_timestamp(tobj.death)}}</td>
</tr>
%if getattr(tobj, 'trace'):
<tr>
<th>Instantiation</th>
<td>
% # <div class="stacktrace">
%for frame in tobj.trace:
<div class="stackframe">
<span class="filename">{{frame[0]}}</span>
<span class="lineno">{{frame[1]}}</span>
<span class="function">{{frame[2]}}</span>
<div class="context">{{frame[3][0].strip()}}</div>
</div>
%end
% # </div>
</td>
</tr>
%end
%for (timestamp, size) in tobj.snapshots:
<tr>
<td>{{pp_timestamp(timestamp)}}</td>
%if not size.refs:
<td>{{pp(size.size)}}</td>
%else:
<td>
{{pp(size.size)}}
%include('asized_referents', referents=size.refs)
</td>
%end
</tr>
%end
</table>
%end
%include('footer')

View File

@@ -0,0 +1,267 @@
"""The tracker module allows you to track changes in the memory usage over
time.
Using the SummaryTracker, you can create summaries and compare them
with each other. Stored summaries can be ignored during comparison,
avoiding the observer effect.
The ObjectTracker allows to monitor object creation. You create objects from
one time and compare with objects from an earlier time.
"""
import gc
import inspect
from pympler import muppy, summary
from pympler.util import compat
class SummaryTracker(object):
""" Helper class to track changes between two summaries taken.
Detailed information on single objects will be lost, e.g. object size or
object id. But often summaries are sufficient to monitor the memory usage
over the lifetime of an application.
On initialisation, a first summary is taken. Every time `diff` is called,
a new summary will be created. Thus, a diff between the new and the last
summary can be extracted.
Be aware that filtering out previous summaries is time-intensive. You
should therefore restrict yourself to the number of summaries you really
need.
"""
def __init__(self, ignore_self=True):
"""Constructor.
The number of summaries managed by the tracker has a performance
impact on new summaries, iff you decide to exclude them from further
summaries. Therefore it is suggested to use them economically.
Keyword arguments:
ignore_self -- summaries managed by this object will be ignored.
"""
self.s0 = summary.summarize(muppy.get_objects())
self.summaries = {}
self.ignore_self = ignore_self
def create_summary(self):
"""Return a summary.
See also the notes on ignore_self in the class as well as the
initializer documentation.
"""
if not self.ignore_self:
res = summary.summarize(muppy.get_objects())
else:
# If the user requested the data required to store summaries to be
# ignored in the summaries, we need to identify all objects which
# are related to each summary stored.
# Thus we build a list of all objects used for summary storage as
# well as a dictionary which tells us how often an object is
# referenced by the summaries.
# During this identification process, more objects are referenced,
# namely int objects identifying referenced objects as well as the
# corresponding count.
# For all these objects it will be checked whether they are
# referenced from outside the monitor's scope. If not, they will be
# subtracted from the snapshot summary, otherwise they are
# included (as this indicates that they are relevant to the
# application).
all_of_them = [] # every single object
ref_counter = {} # how often it is referenced; (id(o), o) pairs
def store_info(o):
all_of_them.append(o)
if id(o) in ref_counter:
ref_counter[id(o)] += 1
else:
ref_counter[id(o)] = 1
# store infos on every single object related to the summaries
store_info(self.summaries)
for k, v in self.summaries.items():
store_info(k)
summary._traverse(v, store_info)
# do the summary
res = summary.summarize(muppy.get_objects())
# remove ids stored in the ref_counter
for _id in ref_counter:
# referenced in frame, ref_counter, ref_counter.keys()
if len(gc.get_referrers(_id)) == (3):
summary._subtract(res, _id)
for o in all_of_them:
# referenced in frame, summary, all_of_them
if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2):
summary._subtract(res, o)
return res
def diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
res = None
if summary2 is None:
self.s1 = self.create_summary()
if summary1 is None:
res = summary.get_diff(self.s0, self.s1)
else:
res = summary.get_diff(summary1, self.s1)
self.s0 = self.s1
else:
if summary1 is not None:
res = summary.get_diff(summary1, summary2)
else:
raise ValueError(
"You cannot provide summary2 without summary1.")
return summary._sweep(res)
def print_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
summary.print_(self.diff(summary1=summary1, summary2=summary2))
def format_diff(self, summary1=None, summary2=None):
"""Compute diff between to summaries and return a list of formatted
lines.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
"""
return summary.format_(self.diff(summary1=summary1, summary2=summary2))
def store_summary(self, key):
"""Store a current summary in self.summaries."""
self.summaries[key] = self.create_summary()
class ObjectTracker(object):
"""
Helper class to track changes in the set of existing objects.
Each time you invoke a diff with this tracker, the objects which existed
during the last invocation are compared with the objects which exist during
the current invocation.
Please note that in order to do so, strong references to all objects will
be stored. This means that none of these objects can be garbage collected.
A use case for the ObjectTracker is the monitoring of a state which should
be stable, but you see new objects being created nevertheless. With the
ObjectTracker you can identify these new objects.
"""
# Some precaution needs to be taken when handling frame objects (see
# warning at http://docs.python.org/lib/inspect-stack.html). All ignore
# lists used need to be emptied so no frame objects remain referenced.
def __init__(self):
"""On initialisation, the current state of objects is stored.
Note that all objects which exist at this point in time will not be
released until you destroy this ObjectTracker instance.
"""
self.o0 = self._get_objects(ignore=(inspect.currentframe(),))
def _get_objects(self, ignore=()):
"""Get all currently existing objects.
XXX - ToDo: This method is a copy&paste from muppy.get_objects, but
some modifications are applied. Specifically, it allows to ignore
objects (which includes the current frame).
keyword arguments
ignore -- list of objects to ignore
"""
def remove_ignore(objects, ignore=()):
# remove all objects listed in the ignore list
res = []
for o in objects:
if not compat.object_in_list(o, ignore):
res.append(o)
return res
tmp = gc.get_objects()
ignore += (inspect.currentframe(), self, ignore, remove_ignore)
if hasattr(self, 'o0'):
ignore += (self.o0,)
if hasattr(self, 'o1'):
ignore += (self.o1,)
# this implies that referenced objects are also ignored
tmp = remove_ignore(tmp, ignore)
res = []
for o in tmp:
# gc.get_objects returns only container objects, but we also want
# the objects referenced by them
refs = muppy.get_referents(o)
for ref in refs:
if not gc.is_tracked(ref):
# we already got the container objects, now we only add
# non-container objects
res.append(ref)
res.extend(tmp)
res = muppy._remove_duplicates(res)
if ignore is not None:
# repeat to filter out objects which may have been referenced
res = remove_ignore(res, ignore)
# manual cleanup, see comment above
del ignore
return res
def get_diff(self, ignore=()):
"""Get the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
self.o1 = self._get_objects(ignore+(inspect.currentframe(),))
diff = muppy.get_diff(self.o0, self.o1)
self.o0 = self.o1
# manual cleanup, see comment above
return diff
def print_diff(self, ignore=()):
"""Print the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
for line in self.format_diff(ignore+(inspect.currentframe(),)):
print(line)
def format_diff(self, ignore=()):
"""Format the diff to the last time the state of objects was measured.
keyword arguments
ignore -- list of objects to ignore
"""
# ignore this and the caller frame
lines = []
diff = self.get_diff(ignore+(inspect.currentframe(),))
lines.append("Added objects:")
for line in summary.format_(summary.summarize(diff['+'])):
lines.append(line)
lines.append("Removed objects:")
for line in summary.format_(summary.summarize(diff['-'])):
lines.append(line)
return lines

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
"""
Compatibility layer to allow Pympler being used from different Python versions.
"""
from typing import Any, Iterable
try:
import tkinter
except ImportError:
tkinter = None # type: ignore
# Helper functions
def object_in_list(obj: Any, l: Iterable) -> bool:
"""Returns True if object o is in list.
Required compatibility function to handle WeakSet objects.
"""
for o in l:
if o is obj:
return True
return False

View File

@@ -0,0 +1,77 @@
"""
String utility functions.
"""
from typing import Any, Optional, Union
def safe_repr(obj: Any, clip: Optional[int] = None) -> str:
"""
Convert object to string representation, yielding the same result a `repr`
but catches all exceptions and returns 'N/A' instead of raising the
exception. Strings may be truncated by providing `clip`.
>>> safe_repr(42)
'42'
>>> safe_repr('Clipped text', clip=8)
'Clip..xt'
>>> safe_repr([1,2,3,4], clip=8)
'[1,2..4]'
"""
try:
s = repr(obj)
if not clip or len(s) <= clip:
return s
else:
return s[:clip - 4] + '..' + s[-2:]
except:
return 'N/A'
def trunc(obj: str, max: int, left: bool = False) -> str:
"""
Convert `obj` to string, eliminate newlines and truncate the string to
`max` characters. If there are more characters in the string add ``...`` to
the string. With `left=True`, the string can be truncated at the beginning.
@note: Does not catch exceptions when converting `obj` to string with
`str`.
>>> trunc('This is a long text.', 8)
This ...
>>> trunc('This is a long text.', 8, left=True)
...text.
"""
s = str(obj)
s = s.replace('\n', '|')
if len(s) > max:
if left:
return '...' + s[len(s) - max + 3:]
else:
return s[:(max - 3)] + '...'
else:
return s
def pp(i: Union[int, float], base: int = 1024) -> str:
"""
Pretty-print the integer `i` as a human-readable size representation.
"""
degree = 0
pattern = "%4d %s"
while i > base:
pattern = "%7.2f %s"
i = i / float(base)
degree += 1
scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB']
return pattern % (i, scales[degree])
def pp_timestamp(t: Optional[float]) -> str:
"""
Get a friendly timestamp represented as a string.
"""
if t is None:
return ''
h, m, s = int(t / 3600), int(t / 60 % 60), t % 60
return "%02d:%02d:%05.2f" % (h, m, s)

View File

@@ -0,0 +1,346 @@
"""
This module provides a web-based memory profiling interface. The Pympler web
frontend exposes process information, tracker statistics, and garbage graphs.
The web frontend uses `Bottle <http://bottlepy.org>`_, a lightweight Python
web framework. Bottle is packaged with Pympler.
The web server can be invoked almost as easily as setting a breakpoint using
*pdb*::
from pympler.web import start_profiler
start_profiler()
Calling ``start_profiler`` suspends the current thread and executes the Pympler
web server, exposing profiling data and various facilities of the Pympler
library via a graphic interface.
"""
import sys
import os
import threading
from inspect import getouterframes
from json import dumps
from shutil import rmtree
from tempfile import mkdtemp
from threading import Thread
from weakref import WeakValueDictionary
from wsgiref.simple_server import make_server
from pympler import asizeof
from pympler.garbagegraph import GarbageGraph
from pympler.process import get_current_threads, ProcessMemoryInfo
from pympler.util.stringutils import safe_repr
# Prefer the installed version of bottle.py. If bottle.py is not installed
# fallback to the vendored version.
try:
import bottle
except ImportError:
from pympler.util import bottle
class ServerState(threading.local):
"""
Represents the state of a running server. Needs to be thread local so
multiple servers can be started in different threads without interfering
with each other.
Cache internal structures (garbage graphs, tracker statistics).
"""
def __init__(self):
self.server = None
self.stats = None
self.garbage_graphs = None
self.id2ref = WeakValueDictionary()
self.id2obj = dict()
def clear_cache(self):
self.garbage_graphs = None
server = ServerState()
def get_ref(obj):
"""
Get string reference to object. Stores a weak reference in a dictionary
using the object's id as the key. If the object cannot be weakly
referenced (e.g. dictionaries, frame objects), store a strong references
in a classic dictionary.
Returns the object's id as a string.
"""
oid = id(obj)
try:
server.id2ref[oid] = obj
except TypeError:
server.id2obj[oid] = obj
return str(oid)
def get_obj(ref):
"""Get object from string reference."""
oid = int(ref)
return server.id2ref.get(oid) or server.id2obj[oid]
pympler_path = os.path.dirname(os.path.abspath(__file__))
static_files = os.path.join(pympler_path, 'templates')
bottle.TEMPLATE_PATH.append(static_files)
@bottle.route('/')
@bottle.view('index')
def root():
"""Get overview."""
pmi = ProcessMemoryInfo()
return dict(processinfo=pmi)
@bottle.route('/process')
@bottle.view('process')
def process():
"""Get process overview."""
pmi = ProcessMemoryInfo()
threads = get_current_threads()
return dict(info=pmi, threads=threads)
@bottle.route('/tracker')
@bottle.view('tracker')
def tracker_index():
"""Get tracker overview."""
stats = server.stats
if stats and stats.snapshots:
stats.annotate()
timeseries = []
for cls in stats.tracked_classes:
series = []
for snapshot in stats.snapshots:
series.append(snapshot.classes.get(cls, {}).get('sum', 0))
timeseries.append((cls, series))
series = [s.overhead for s in stats.snapshots]
timeseries.append(("Profiling overhead", series))
if stats.snapshots[0].system_total.data_segment:
# Assume tracked data resides in the data segment
series = [s.system_total.data_segment - s.tracked_total - s.overhead
for s in stats.snapshots]
timeseries.append(("Data segment", series))
series = [s.system_total.code_segment for s in stats.snapshots]
timeseries.append(("Code segment", series))
series = [s.system_total.stack_segment for s in stats.snapshots]
timeseries.append(("Stack segment", series))
series = [s.system_total.shared_segment for s in stats.snapshots]
timeseries.append(("Shared memory", series))
else:
series = [s.total - s.tracked_total - s.overhead
for s in stats.snapshots]
timeseries.append(("Other", series))
timeseries = [dict(label=label, data=list(enumerate(data)))
for label, data in timeseries]
return dict(snapshots=stats.snapshots, timeseries=dumps(timeseries))
else:
return dict(snapshots=[])
@bottle.route('/tracker/class/<clsname>')
@bottle.view('tracker_class')
def tracker_class(clsname):
"""Get class instance details."""
stats = server.stats
if not stats:
bottle.redirect('/tracker')
stats.annotate()
return dict(stats=stats, clsname=clsname)
@bottle.route('/refresh')
def refresh():
"""Clear all cached information."""
server.clear_cache()
bottle.redirect('/')
@bottle.route('/traceback/<threadid>')
@bottle.view('stacktrace')
def get_traceback(threadid):
threadid = int(threadid)
frames = sys._current_frames()
if threadid in frames:
frame = frames[threadid]
stack = getouterframes(frame, 5)
stack.reverse()
stack = [(get_ref(f[0].f_locals),) + f[1:] for f in stack]
else:
stack = []
return dict(stack=stack, threadid=threadid)
@bottle.route('/objects/<oid>')
@bottle.view('referents')
def get_obj_referents(oid):
referents = {}
obj = get_obj(oid)
if type(obj) is dict:
named_objects = asizeof.named_refs(obj)
else:
refs = asizeof._getreferents(obj)
named_objects = [(repr(type(x)), x) for x in refs]
for name, o in named_objects:
referents[name] = (get_ref(o), type(o).__name__,
safe_repr(o, clip=48), asizeof.asizeof(o))
return dict(referents=referents)
@bottle.route('/static/<filename>')
def static_file(filename):
"""Get static files (CSS-files)."""
return bottle.static_file(filename, root=static_files)
def _compute_garbage_graphs():
"""
Retrieve garbage graph objects from cache, compute if cache is cold.
"""
if server.garbage_graphs is None:
server.garbage_graphs = GarbageGraph().split_and_sort()
return server.garbage_graphs
@bottle.route('/garbage')
@bottle.view('garbage_index')
def garbage_index():
"""Get garbage overview."""
garbage_graphs = _compute_garbage_graphs()
return dict(graphs=garbage_graphs)
@bottle.route('/garbage/<index:int>')
@bottle.view('garbage')
def garbage_cycle(index):
"""Get reference cycle details."""
graph = _compute_garbage_graphs()[int(index)]
graph.reduce_to_cycles()
objects = graph.metadata
objects.sort(key=lambda x: -x.size)
return dict(objects=objects, index=index)
def _get_graph(graph, filename):
"""Retrieve or render a graph."""
try:
rendered = graph.rendered_file
except AttributeError:
try:
graph.render(os.path.join(server.tmpdir, filename), format='png')
rendered = filename
except OSError:
rendered = None
graph.rendered_file = rendered
return rendered
@bottle.route('/garbage/graph/<index:int>')
def garbage_graph(index):
"""Get graph representation of reference cycle."""
graph = _compute_garbage_graphs()[int(index)]
reduce_graph = bottle.request.GET.get('reduce', '')
if reduce_graph:
graph = graph.reduce_to_cycles()
if not graph:
return None
filename = 'garbage%so%s.png' % (index, reduce_graph)
rendered_file = _get_graph(graph, filename)
if rendered_file:
return bottle.static_file(rendered_file, root=server.tmpdir)
else:
return None
@bottle.route('/help')
def show_documentation():
"""Redirect to online documentation."""
bottle.redirect('https://pympler.readthedocs.io/en/latest/')
class PymplerServer(bottle.ServerAdapter):
"""Simple WSGI server."""
def run(self, handler):
self.server = make_server(self.host, self.port, handler)
self.server.serve_forever()
def start_profiler(host='localhost', port=8090, tracker=None, stats=None,
debug=False, **kwargs):
"""
Start the web server to show profiling data. The function suspends the
Python application (the current thread) until the web server is stopped.
The only way to stop the server is to signal the running thread, e.g. press
Ctrl+C in the console. If this isn't feasible for your application use
`start_in_background` instead.
During the execution of the web server, profiling data is (lazily) cached
to improve performance. For example, garbage graphs are rendered when the
garbage profiling data is requested and are simply retransmitted upon later
requests.
The web server can display profiling data from previously taken snapshots
when `tracker` or `stats` is specified. The former is useful for profiling
a running application, the latter for off-line analysis. Requires existing
snapshots taken with
:py:meth:`~pympler.classtracker.ClassTracker.create_snapshot` or
:py:meth:`~pympler.classtracker.ClassTracker.start_periodic_snapshots`.
:param host: the host where the server shall run, default is localhost
:param port: server listens on the specified port, default is 8090 to allow
coexistance with common web applications
:param tracker: `ClassTracker` instance, browse profiling data (on-line
analysis)
:param stats: `Stats` instance, analyze `ClassTracker` profiling dumps
(useful for off-line analysis)
"""
if tracker and not stats:
server.stats = tracker.stats
else:
server.stats = stats
try:
server.tmpdir = mkdtemp(prefix='pympler')
server.server = PymplerServer(host=host, port=port, **kwargs)
bottle.debug(debug)
bottle.run(server=server.server)
finally:
rmtree(server.tmpdir)
class ProfilerThread(Thread):
"""Encapsulates a thread to run the web server."""
def __init__(self, group=None, target=None, name='Pympler web frontend',
**kwargs):
super(ProfilerThread, self).__init__(group=group,
target=target,
name=name)
self.kwargs = kwargs
self.daemon = True
def run(self):
start_profiler(**self.kwargs)
def start_in_background(**kwargs):
"""
Start the web server in the background. A new thread is created which
serves the profiling interface without suspending the current application.
For the documentation of the parameters see `start_profiler`.
Returns the created thread object.
"""
thread = ProfilerThread(**kwargs)
thread.start()
return thread