From ca077e5b1926373831be416e427dd8bcf26f7675 Mon Sep 17 00:00:00 2001 From: erikhuck Date: Mon, 5 May 2025 21:20:49 -0400 Subject: [PATCH 1/7] Adds an analysis of the sub-tracked code in addition to a comparison of the analysis between tracking sessions --- src/gpu_tracker/__init__.py | 3 +- src/gpu_tracker/__main__.py | 161 ++++++++---- src/gpu_tracker/_helper_classes.py | 310 +++++++++++++++++++--- src/gpu_tracker/sub_tracker.py | 408 ++++++++++++++++++++++++++++- src/gpu_tracker/tracker.py | 24 +- 5 files changed, 806 insertions(+), 100 deletions(-) diff --git a/src/gpu_tracker/__init__.py b/src/gpu_tracker/__init__.py index 3755370..ebaf911 100644 --- a/src/gpu_tracker/__init__.py +++ b/src/gpu_tracker/__init__.py @@ -10,5 +10,4 @@ __version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir)) from .tracker import Tracker -from .sub_tracker import SubTracker -from .sub_tracker import sub_track +from .sub_tracker import SubTracker, sub_track, SubTrackingAnalyzer, TrackingComparison diff --git a/src/gpu_tracker/__main__.py b/src/gpu_tracker/__main__.py index d71305f..58b4195 100644 --- a/src/gpu_tracker/__main__.py +++ b/src/gpu_tracker/__main__.py @@ -4,14 +4,18 @@ Usage: gpu-tracker -h | --help gpu-tracker -v | --version - gpu-tracker --execute= [--output=] [--format=] [--st=] [--ru=] [--gru=] [--tu=] [--nec=] [--guuids=] [--disable-logs] [--gb=] [--tf=] + gpu-tracker --execute= [--output=] [--format=] [--tconfig=] [--st=] [--ru=] [--gru=] [--tu=] [--nec=] [--guuids=] [--disable-logs] [--gb=] [--tf=] [--overwrite] + gpu-tracker sub-track combine --stf= -p ... + gpu-tracker sub-track analyze --tf= --stf= [--output=] [--format=] + gpu-tracker sub-track compare [--output=] [--format=] [--cconfig=] [-m =...] [--stat=] Options: -h --help Show this help message and exit. -v --version Show package version and exit. -e --execute= The command to run along with its arguments all within quotes e.g. "ls -l -a". - -o --output= File path to store the computational-resource-usage measurements. If not set, prints measurements to the screen. - -f --format= File format of the output. Either 'json' or 'text'. Defaults to 'text'. + -o --output= File path to store the computational-resource-usage measurements in the case of tracking or the analysis report in the case of sub-tracking. If not set, prints to the screen. + -f --format= File format of the output. Either 'json', 'text', or 'pickle'. Defaults to 'text'. + --tconfig= JSON config file containing the key word arguments to the ``Tracker`` class (see API) to be optionally used instead of the corresponding commandline options. If any commandline options are set, they will override the corresponding arguments provided by the config file. --st= The number of seconds to sleep in between usage-collection iterations. --ru= One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'. --gru= One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'. @@ -21,65 +25,134 @@ --disable-logs If set, warnings are suppressed during tracking. Otherwise, the Tracker logs warnings as usual. --gb= The brand of GPU to profile. Valid values are nvidia and amd. Defaults to the brand of GPU detected in the system, checking NVIDIA first. --tf= If specified, stores the individual resource usage measurements at each iteration. Valid file formats are CSV (.csv) and SQLite (.sqlite) where the SQLite file format stores the data in a table called "data" and allows for more efficient querying. + --overwrite Whether to overwrite the tracking file if it already existed before the beginning of this tracking session. Do not set if the data in the existing tracking file is still needed. + sub-track Perform sub-tracking related commands. + combine Combines multiple sub-tracking files into one. This is usually a result of sub-tracking a code block that is called in multiple simultaneous processes. + --stf= The path to the sub-tracking file used to specify the timestamps of specific code-blocks. If not generated by the gpu-tracker API, must be either a CSV or SQLite file (where the SQLite file contains a table called "data") where the headers are precisely process_id, code_block_name, position, and timestamp. The process_id is the ID of the process where the code block is called. code_block_name is the name of the code block. position is whether it is the start or the stopping point of the code block where 0 represents start and 1 represents stop. And timestamp is the timestamp where the code block starts or where it stops. + -p Paths to the sub-tracking files to combine. Must all be the same file format and the same file format as the resulting sub-tracking file (either .csv or .sqlite). If only one path is provided, it is interpreted as a path to a directory and all the files in this directory are combined. + analyze Generate the sub-tracking analysis report using the tracking file and sub-tracking file for resource usage of specific code blocks. + compare Compares multiple tracking sessions to determine differences in computational resource usage by loading sub-tracking results given their file paths. Sub-tracking results files must be in pickle format e.g. running the ``sub-track analyze`` command and specifying a file path for ``--output`` and 'pickle' for the ``--format`` option. If code block results are not included in the sub-tracking files (i.e. no code blocks were sub-tracked), then only overall results are compared. + --cconfig= JSON config file containing the ``file_path_map`` argument for the ``TrackerComparison`` class and ``statistic`` argument for its ``compare`` method (see API) that can be used instead of the corresponding ``-m =`` and ``--stat=`` commandline options respectively. If additional ``-m =`` options are added on the commandline in addition to a config file, they will be added to the ``file_path_map`` in the config file. If a ``--stat`` option is provided on the commandline, it will override the ``statistic`` in the config file. + -m = Mapping of tracking session names to the path of the file containing the sub-tracking results of said tracking session. Must be in pickle format. + --stat= The summary statistic of the measurements to compare. One of 'min', 'max', 'mean', or 'std'. Defaults to 'mean'. """ import docopt as doc import subprocess as subp import json import logging as log import sys -from . import Tracker -from . import __version__ +import os +import pickle as pkl +from . import Tracker, SubTrackingAnalyzer, TrackingComparison, __version__ def main(): args = doc.docopt(__doc__, version=__version__) - command = args['--execute'].split() output = args['--output'] output_format = args['--format'] if args['--format'] is not None else 'text' - option_map = { - '--st': 'sleep_time', - '--ru': 'ram_unit', - '--gru': 'gpu_ram_unit', - '--tu': 'time_unit', - '--nec': 'n_expected_cores', - '--guuids': 'gpu_uuids', - '--disable-logs': 'disable_logs', - '--gb': 'gpu_brand', - '--tf': 'tracking_file' - } - kwargs = { - option_map[option]: value for option, value in args.items() if value is not None and option not in { - '--execute', '--output', '--format', '--help', '--version'}} - if 'sleep_time' in kwargs.keys(): - kwargs['sleep_time'] = float(kwargs['sleep_time']) - if 'n_expected_cores' in kwargs.keys(): - kwargs['n_expected_cores'] = int(kwargs['n_expected_cores']) - if 'gpu_uuids' in kwargs.keys(): - kwargs['gpu_uuids'] = set(kwargs['gpu_uuids'].split(',')) - if len(command) == 0: - log.error('Empty command provided.') - sys.exit(1) - try: - process = subp.Popen(command) - except FileNotFoundError: - log.error(f'Command not found: "{command[0]}"') - sys.exit(1) - except Exception as e: - log.error(f'The following error occurred when starting the command "{command[0]}":') - print(e) - sys.exit(1) - with Tracker(process_id=process.pid, **kwargs) as tracker: - process.wait() - print(f'Resource tracking complete. Process completed with status code: {process.returncode}') + if args['sub-track']: + if args['analyze']: + tracking_file = args['--tf'] + sub_tracking_file = args['--stf'] + results = SubTrackingAnalyzer(tracking_file, sub_tracking_file).sub_tracking_results() + _process_output(output_format, results, output) + elif args['combine']: + directory = None + files = None + files = args['-p'] + if len(files) == 1: + [directory] = files + files = [os.path.join(directory, file) for file in os.listdir(directory)] + SubTrackingAnalyzer(None, args['--stf']).combine_sub_tracking_files(files) + elif args['compare']: + if args['--cconfig'] is not None: + with open(args['--cconfig'], 'r') as file: + config = json.load(file) + file_path_map = config['file_path_map'] if 'file_path_map' in config else None + statistic = config['statistic'] if 'statistic' in config else None + else: + file_path_map = dict[str, str]() + statistic = None + if not file_path_map and args['-m'] is None: + raise ValueError( + f'A mapping of tracking session name to file path must be provided either through the -m option or a config file.' + ) + else: + file_path_map.update({name: file for [name, file] in [option.split('=') for option in args['-m']]}) + if args['--stat'] is not None: + statistic = args['--stat'] + elif statistic is None: + statistic = 'mean' + comparison = TrackingComparison(file_path_map) + results = comparison.compare(statistic) + _process_output(output_format, results, output) + else: + command = args['--execute'].split() + if args['--tconfig'] is not None: + with open(args['--tconfig'], 'r') as file: + kwargs = json.load(file) + else: + kwargs = dict() + option_map = { + '--st': 'sleep_time', + '--ru': 'ram_unit', + '--gru': 'gpu_ram_unit', + '--tu': 'time_unit', + '--nec': 'n_expected_cores', + '--guuids': 'gpu_uuids', + '--disable-logs': 'disable_logs', + '--gb': 'gpu_brand', + '--tf': 'tracking_file', + '--overwrite': 'overwrite' + } + kwargs.update({ + option_map[option]: value for option, value in args.items() if value is not None and option not in { + '--execute', '--output', '--format', '--help', '--version', 'sub-track', 'analyze', '--stf', 'combine', '-p', 'compare', + '-m', '--tconfig', '--cconfig' + } + }) + if 'sleep_time' in kwargs.keys(): + kwargs['sleep_time'] = float(kwargs['sleep_time']) + if 'n_expected_cores' in kwargs.keys(): + kwargs['n_expected_cores'] = int(kwargs['n_expected_cores']) + if 'gpu_uuids' in kwargs.keys(): + # noinspection PyUnresolvedReferences + kwargs['gpu_uuids'] = set(kwargs['gpu_uuids'].split(',')) + if len(command) == 0: + log.error('Empty command provided.') + sys.exit(1) + try: + process = subp.Popen(command) + except FileNotFoundError: + log.error(f'Command not found: "{command[0]}"') + sys.exit(1) + except Exception as e: + log.error(f'The following error occurred when starting the command "{command[0]}":') + print(e) + sys.exit(1) + with Tracker(process_id=process.pid, **kwargs) as tracker: + process.wait() + print(f'Resource tracking complete. Process completed with status code: {process.returncode}') + _process_output(output_format, tracker, output) + + +def _process_output(output_format: str, output_obj, output: str | None): if output_format == 'json': - output_str = json.dumps(tracker.to_json(), indent=1) + output_str = json.dumps(output_obj.to_json(), indent=1) elif output_format == 'text': - output_str = str(tracker) + output_str = str(output_obj) + elif output_format == 'pickle': + output_str = pkl.dumps(output_obj) else: log.error(f'"{output_format}" is not a valid format. Valid values are "json" or "text".') sys.exit(1) if output is None: print(output_str) else: - with open(output, 'w') as file: + mode = 'wb' if output_format == 'pickle' else 'w' + with open(output, mode) as file: file.write(output_str) + + +if __name__ == '__main__': + main() diff --git a/src/gpu_tracker/_helper_classes.py b/src/gpu_tracker/_helper_classes.py index 3739dec..9dc5941 100644 --- a/src/gpu_tracker/_helper_classes.py +++ b/src/gpu_tracker/_helper_classes.py @@ -1,14 +1,17 @@ from __future__ import annotations import abc +import os.path import subprocess as subp import pandas as pd import io -import os import csv import dataclasses as dclass import sqlalchemy as sqlalc import sqlalchemy.orm as sqlorm import enum +import tqdm + +_SUMMARY_STATS = ['min', 'max', 'mean', 'std'] class _GPUQuerier(abc.ABC): @@ -137,77 +140,239 @@ class _TimepointUsage: timestamp: float = 0.0 +@dclass.dataclass +class _StaticData: + ram_unit: str + gpu_ram_unit: str + time_unit: str + ram_system_capacity: float + gpu_ram_system_capacity: float + system_core_count: int + n_expected_cores: int + system_gpu_count: int + n_expected_gpus: int + + @dclass.dataclass class _SubTrackerLog: class CodeBlockPosition(enum.Enum): - START = 'START' - STOP = 'STOP' + START = 0 + STOP = 1 process_id: int code_block_name: str position: CodeBlockPosition timestamp: float -class _Writer(abc.ABC): +class _DataProxy(abc.ABC): + _files_w_data = set[str]() + _files_w_static_data = set[str]() + @staticmethod - def create(file: str | None) -> _Writer | None: + def create(file: str | None, overwrite: bool = False) -> _DataProxy | None: if file is not None: if file.endswith('.csv'): - return _CSVWriter(file) + return _CSVDataProxy(file, overwrite) elif file.endswith('.sqlite'): - return _SQLiteWriter(file) + return _SQLiteDataProxy(file, overwrite) else: raise ValueError( f'Invalid file name: "{file}". Valid file extensions are ".csv" and ".sqlite".') else: return None - def __init__(self, file: str): - self._file = file + def __init__(self, file_name: str, overwrite: bool): + self._file_name = file_name + self._overwrite = overwrite + self._extension = '.csv' if self._file_name.endswith('.csv') else '.sqlite' - def write_row(self, values: object): - values = dclass.asdict(values) - if not os.path.isfile(self._file): - self._create_file(values) - self._write_row(values) + def _check_overwrite(self): + if os.path.isfile(self._file_name): + if self._overwrite: + os.remove(self._file_name) + else: + raise FileExistsError(f'File {self._file_name} already exists. Set overwrite to True to overwrite the existing file.') + + def write_data(self, data: _TimepointUsage | _SubTrackerLog): + data = dclass.asdict(data) + if self._file_name not in _DataProxy._files_w_data: + if self._file_name not in _DataProxy._files_w_static_data: + self._check_overwrite() + self._create_table(data) + _DataProxy._files_w_data.add(self._file_name) + if not os.path.isfile(self._file_name): + raise FileNotFoundError(f'The file {self._file_name} was removed in the middle of writing data to it.') + self._write_data(data) @abc.abstractmethod - def _write_row(self, values: dict): + def _write_data(self, data: dict): pass # pragma: nocover @abc.abstractmethod - def _create_file(self, values: dict): + def _create_table(self, data: dict): pass # pragma: nocover + def write_static_data(self, data: _StaticData): + self._check_overwrite() + self._write_static_data(data) + _DataProxy._files_w_static_data.add(self._file_name) -class _CSVWriter(_Writer): - def _write_row(self, values: dict): - with open(self._file, 'a', newline='') as f: - writer = csv.DictWriter(f, fieldnames=values.keys()) - writer.writerow(values) + @abc.abstractmethod + def _write_static_data(self, data: _StaticData): + pass # pragma: nocover + + def read_static_data(self) -> pd.Series: + return self._read_static_data().squeeze() - def _create_file(self, values: dict): - with open(self._file, 'w', newline='') as f: - writer = csv.DictWriter(f, fieldnames=values.keys()) - writer.writeheader() + @abc.abstractmethod + def _read_static_data(self) -> pd.DataFrame: + pass # pragma: nocover + def combine_files(self, files: list[str]): + if os.path.exists(self._file_name): + raise ValueError(f'Cannot create sub-tracking file {self._file_name}. File already exists.') + for file in files: + if not file.endswith(self._extension): + raise ValueError(f'File {file} does not end with the same extension as {self._file_name}. Must end in "{self._extension}".') + self._combine_files(files) + + @abc.abstractmethod + def _combine_files(self, files: list[str]): + pass # pragma: nocover + + def load_timestamp_pairs(self, code_block_name: str) -> list[tuple[float, float]]: + timestamps = self._load_timestamps(code_block_name) + indexes_to_drop = list[int]() + for process_id in timestamps.process_id.unique(): + process_timestamps = timestamps.loc[timestamps.process_id == process_id] + if process_timestamps.position.iloc[-1] == _SubTrackerLog.CodeBlockPosition.START.value: + indexes_to_drop.append(process_timestamps.index[-1]) + timestamps = timestamps.drop(indexes_to_drop) + timestamp_pairs = list[tuple[float, float]]() + for i in range(0, len(timestamps), 2): + timestamp1, timestamp2 = timestamps.iloc[i], timestamps.iloc[i + 1] + start_time, stop_time = float(timestamp1.timestamp), float(timestamp2.timestamp) + pid1, pid2 = int(timestamp1.process_id), int(timestamp2.process_id) + error_prefix = f'Sub-tracking file is invalid. Detected timestamp pair ({start_time}, {stop_time})' + if pid1 != pid2: + raise ValueError(f'{error_prefix} with differing process IDs: {pid1} and {pid2}.') + if start_time > stop_time: + raise ValueError(f'{error_prefix} of process ID {pid1} with a start time greater than the stop time.') + timestamp_pairs.append((start_time, stop_time)) + return timestamp_pairs + + @abc.abstractmethod + def _load_timestamps(self, code_block_name: str) -> pd.DataFrame: + pass # pragma: nocover + + @abc.abstractmethod + def load_timepoints(self, timestamp_pairs: list[tuple[float, float]] | None) -> pd.DataFrame: + pass # pragma: nocover -class _SQLiteWriter(_Writer): + @abc.abstractmethod + def load_code_block_names(self) -> list[str]: + pass # pragma: nocover + + def overall_timepoint_results(self) -> pd.DataFrame: + fields = list(_TimepointUsage.__dataclass_fields__.keys()) + fields.remove('timestamp') + return self._overall_timepoint_results(fields) + + @abc.abstractmethod + def _overall_timepoint_results(self, fields: list[str]) -> pd.DataFrame: + pass # pragma: nocover + + +class _CSVDataProxy(_DataProxy): + def __init__(self, file_name: str, overwrite: bool): + super().__init__(file_name, overwrite) + self._timestamps = None + self._timepoints = None + + @property + def timestamps(self): + if self._timestamps is None: + self._timestamps = pd.read_csv(self._file_name) + return self._timestamps + + @property + def timepoints(self): + if self._timepoints is None: + self._timepoints = pd.read_csv(self._file_name, skiprows=2) + return self._timepoints + + def _write_static_data(self, data: _StaticData): + if self._file_name in _DataProxy._files_w_data: + raise RuntimeError('The static data for a CSV file must be created before the dynamic data.') + static_data = dclass.asdict(data) + self._create_table(static_data) + self._write_data(static_data) + + def _write_data(self, data: dict): + self._with_writer(data, lambda writer: writer.writerow(data)) + + def _create_table(self, data: dict): + self._with_writer(data, lambda writer: writer.writeheader()) + + def _with_writer(self, data: dict, func): + with open(self._file_name, 'a', newline='') as f: + writer = csv.DictWriter(f, fieldnames=data.keys()) + func(writer) + + def _read_static_data(self) -> pd.DataFrame: + return pd.read_csv(self._file_name, header=0, nrows=1) + + def _combine_files(self, files: list[str]): + data = pd.DataFrame() + for file in files: + data = pd.concat([data, pd.read_csv(file)], axis='rows') + data.to_csv(self._file_name, index=False) + + def _load_timestamps(self, code_block_name: str) -> pd.DataFrame: + timestamps = self.timestamps.loc[ + self.timestamps.code_block_name == code_block_name, ['process_id', 'position', 'timestamp'] + ] + return timestamps.sort_values(by=['process_id', 'timestamp', 'position']) + + def load_timepoints(self, timestamp_pairs: list[tuple[float, float]]) -> pd.DataFrame: + selected = None + for start_time, stop_time in timestamp_pairs: + between = (self.timepoints.timestamp >= start_time) & (self.timepoints.timestamp <= stop_time) + if selected is None: + selected = between + else: + selected |= between + return self.timepoints[selected] + + def load_code_block_names(self) -> list[str]: + return sorted(self.timestamps.code_block_name.unique()) + + def _overall_timepoint_results(self, fields: list[str]) -> pd.DataFrame: + return self.timepoints[fields].describe().loc[_SUMMARY_STATS].T + + +class _SQLiteDataProxy(_DataProxy): _DATA_TABLE = 'data' _STATIC_DATA_TABLE = 'static_data' - def _write_row(self, values: dict): - engine = sqlalc.create_engine(f'sqlite:///{self._file}', poolclass=sqlalc.pool.NullPool) + def _write_data(self, data: dict): + self.__write_data(data, _SQLiteDataProxy._DATA_TABLE) + + def __write_data(self, data: dict, table: str): + engine = self._create_engine() metadata = sqlalc.MetaData() - tracking_table = sqlalc.Table(_SQLiteWriter._DATA_TABLE, metadata, autoload_with=engine) + tracking_table = sqlalc.Table(table, metadata, autoload_with=engine) Session = sqlorm.sessionmaker(bind=engine) with Session() as session: - insert_stmt = sqlalc.insert(tracking_table).values(**values) + insert_stmt = sqlalc.insert(tracking_table).values(**data) session.execute(insert_stmt) session.commit() - def _create_file(self, values: dict): - engine = sqlalc.create_engine(f'sqlite:///{self._file}', poolclass=sqlalc.pool.NullPool) + def _create_table(self, data: dict): + self.__create_table(data, _SQLiteDataProxy._DATA_TABLE) + + def __create_table(self, data: dict, table: str): + engine = self._create_engine() metadata = sqlalc.MetaData() type_mapping = { str: sqlalc.String, @@ -215,9 +380,86 @@ def _create_file(self, values: dict): float: sqlalc.Float, } columns = list[sqlalc.Column]() - schema = {name: type(value) for name, value in values.items()} + schema = {name: type(value) for name, value in data.items()} for column_name, data_type in schema.items(): sqlalchemy_type = type_mapping[data_type] columns.append(sqlalc.Column(column_name, sqlalchemy_type)) - sqlalc.Table(_SQLiteWriter._DATA_TABLE, metadata, *columns) + sqlalc.Table(table, metadata, *columns) metadata.create_all(engine) + + def _create_engine(self) -> sqlalc.Engine: + return sqlalc.create_engine(f'sqlite:///{self._file_name}', poolclass=sqlalc.pool.NullPool) + + def _read_sql(self, sql: str) -> pd.DataFrame: + engine = self._create_engine() + return pd.read_sql(sqlalc.text(sql), engine) + + def _write_static_data(self, data: _StaticData): + static_data = dclass.asdict(data) + self.__create_table(static_data, _SQLiteDataProxy._STATIC_DATA_TABLE) + self.__write_data(static_data, _SQLiteDataProxy._STATIC_DATA_TABLE) + + def _read_static_data(self) -> pd.DataFrame: + engine = self._create_engine() + return pd.read_sql_table(_SQLiteDataProxy._STATIC_DATA_TABLE, engine) + + def _combine_files(self, files: list[str]): + engine = self._create_engine() + with engine.connect() as con: + table_created = False + for in_file in tqdm.tqdm(files): + con.execute(sqlalc.text(f"ATTACH DATABASE '{in_file}' AS input_db")) + if not table_created: + con.execute( + sqlalc.text( + f'CREATE TABLE {_SQLiteDataProxy._DATA_TABLE} AS SELECT * FROM input_db.{_SQLiteDataProxy._DATA_TABLE}' + ) + ) + table_created = True + else: + con.execute( + sqlalc.text( + f'INSERT INTO {_SQLiteDataProxy._DATA_TABLE} SELECT * FROM input_db.{_SQLiteDataProxy._DATA_TABLE}' + ) + ) + con.commit() + con.execute(sqlalc.text('DETACH DATABASE input_db')) + con.commit() + + def _load_timestamps(self, code_block_name: str) -> pd.DataFrame: + sql = f""" + SELECT process_id,position,timestamp FROM {_SQLiteDataProxy._DATA_TABLE} + WHERE code_block_name='{code_block_name}' + ORDER BY process_id,timestamp,position; + """ + return self._read_sql(sql) + + def load_timepoints(self, timestamp_pairs: list[tuple[float, float]]) -> pd.DataFrame: + conditions = [f'timestamp BETWEEN {start_time} AND {stop_time}' for (start_time, stop_time) in timestamp_pairs] + where_clause = ' OR\n'.join(conditions) + sql = f'SELECT * FROM {_SQLiteDataProxy._DATA_TABLE} WHERE {where_clause}' + return self._read_sql(sql) + + def _overall_timepoint_results(self, fields: list[str]) -> pd.DataFrame: + sql = 'SELECT\n' + std_func = 'sqrt((sum({0} * {0}) - (sum({0}) * sum({0})) / count({0})) / count({0})) AS "STDDEV({0})"' + sql_funcs = 'MIN', 'MAX', 'AVG', 'STDDEV' + field_aggregates = list[str]() + for func in sql_funcs: + for field in fields: + aggregate = f'{func}({field})' if func != 'STDDEV' else std_func.format(field) + field_aggregates.append(aggregate) + sql += ',\n'.join(field_aggregates) + sql += f'\nFROM {_SQLiteDataProxy._DATA_TABLE}' + results = self._read_sql(sql).squeeze() + reshaped_results = pd.DataFrame() + n_fields = len(fields) + for i, sql_func, index in zip(range(0, len(results), n_fields), sql_funcs, _SUMMARY_STATS): + next_row = results.iloc[i: i + n_fields] + next_row.index = [col.replace(sql_func, '').replace('(', '').replace(')', '') for col in next_row.index] + reshaped_results.loc[:, index] = next_row + return reshaped_results + + def load_code_block_names(self) -> list[str]: + sql = f'SELECT DISTINCT code_block_name FROM {_SQLiteDataProxy._DATA_TABLE}' + return sorted(self._read_sql(sql).code_block_name) diff --git a/src/gpu_tracker/sub_tracker.py b/src/gpu_tracker/sub_tracker.py index 484e2b4..9140dee 100644 --- a/src/gpu_tracker/sub_tracker.py +++ b/src/gpu_tracker/sub_tracker.py @@ -1,9 +1,15 @@ """The ``sub_tracker`` module contains the ``SubTracker`` class which can alternatively be imported directly from the ``gpu_tracker`` package.""" +from __future__ import annotations import inspect import os import time import functools -from ._helper_classes import _Writer, _SubTrackerLog +import pandas as pd +import dataclasses as dclass +import pickle as pkl +import logging as log +import typing as typ +from ._helper_classes import _DataProxy, _SubTrackerLog, _SUMMARY_STATS class SubTracker: @@ -16,31 +22,34 @@ class SubTracker: :ivar str code_block_name: The name of the code block being sub-tracked. :ivar str sub_tracking_file: The path to the file where the sub-tracking info is logged. """ + def __init__( - self, code_block_name: str | None = None, code_block_attribute: str | None = None, sub_tracking_file: str | None = None): + self, code_block_name: str | None = None, code_block_attribute: str | None = None, sub_tracking_file: str | None = None, + overwrite: bool = False): """ :param code_block_name: The name of the code block within a ``Tracker`` context that is being sub-tracked. Defaults to the file path where the ``SubTracker`` context is started followed by a colon followed by the ``code_block_attribute``. :param code_block_attribute: Only used if ``code_block_name`` is ``None``. Defaults to the line number where the ``SubTracker`` context is started. - :param sub_tracking_file: The path to the file to log the time stamps of the code block being sub-tracked. To avoid file lock errors when a sub-tracking file is created in multiple different processes (i.e. multiple processes attempting to access the same file at the same time), the sub-tracking file of each process must have a unique name. For example, the ID of the process where the SubTracker context is created. Defaults to this process ID as the file name and in CSV format. + :param sub_tracking_file: The path to the file to log the time stamps of the code block being sub-tracked. To avoid file lock errors when a sub-tracking file is created in multiple different processes (i.e. multiple processes attempting to access the same file at the same time), the sub-tracking file of each process must have a unique name. For example, the ID of the process where the SubTracker context is created. Defaults to this process ID as the file name and in CSV format. These files can be combined into one using the ``Analyzer.combine_sub_tracking_files`` function. + :param overwrite: Whether to overwrite the ``sub_tracking_file`` if it already existed before the beginning of this tracking session. """ if code_block_name is not None: self.code_block_name = code_block_name else: stack = inspect.stack() caller_frame = stack[1] - file_path = os.path.abspath(caller_frame.filename) + file_path = os.path.relpath(caller_frame.filename) code_block_attribute = caller_frame.lineno if code_block_attribute is None else code_block_attribute self.code_block_name = f'{file_path}:{code_block_attribute}' self.process_id = os.getpid() if sub_tracking_file is None: sub_tracking_file = f'{self.process_id}.csv' self.sub_tracking_file = sub_tracking_file - self._sub_tracking_file = _Writer.create(self.sub_tracking_file) + self._data_proxy = _DataProxy.create(self.sub_tracking_file, overwrite) def _log(self, code_block_position: _SubTrackerLog.CodeBlockPosition): sub_tracker_log = _SubTrackerLog( process_id=self.process_id, code_block_name=self.code_block_name, position=code_block_position.value, timestamp=time.time()) - self._sub_tracking_file.write_row(sub_tracker_log) + self._data_proxy.write_data(sub_tracker_log) def __enter__(self): self._log(_SubTrackerLog.CodeBlockPosition.START) @@ -50,16 +59,20 @@ def __exit__(self, *_): self._log(_SubTrackerLog.CodeBlockPosition.STOP) -def sub_track(code_block_name: str | None = None, code_block_attribute: str | None = None, sub_tracking_file: str | None = None): +def sub_track( + code_block_name: str | None = None, code_block_attribute: str | None = None, sub_tracking_file: str | None = None, + overwrite: bool = False): """ Decorator for sub tracking calls to a specified function. Creates a ``SubTracker`` context that wraps the function call. :param code_block_name: The ``code_block_name`` argument passed to the ``SubTracker``. Defaults to the file path where the decorated function is defined followed by a colon followed by the ``code_block_attribute``. :param code_block_attribute: The ``code_block_attribute`` argument passed to the ``SubTracker``. Defaults to the name of the decorated function. - :param sub_tracking_file: the ``sub_tracking_file`` argument passed to the ``SubTracker``. Same default as the ``SubTracker`` constructor. + :param sub_tracking_file: the ``sub_tracking_file`` argument passed to the ``SubTracker``. Same default as the ``SubTracker`` constructor. If using the decorated function in multiprocessing, if you'd like to name it based on the ID of a child process for uniqueness, you may need to set the start method to "spawn" like so ``multiprocessing.set_start_method('spawn')``. + :param overwrite: The ``overwrite`` argument passed to the ``SubTracker``. """ + def decorator(func): - nonlocal code_block_name, code_block_attribute, sub_tracking_file + nonlocal code_block_name, code_block_attribute, sub_tracking_file, overwrite if code_block_name is None: stack = inspect.stack() caller_frame = stack[1] @@ -69,11 +82,382 @@ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - nonlocal sub_tracking_file + nonlocal sub_tracking_file, overwrite with SubTracker( - code_block_name=code_block_name, code_block_attribute=code_block_attribute, sub_tracking_file=sub_tracking_file - ): + code_block_name=code_block_name, code_block_attribute=code_block_attribute, sub_tracking_file=sub_tracking_file, + overwrite=overwrite): return_value = func(*args, **kwargs) return return_value return wrapper return decorator + + +class SubTrackingAnalyzer: + """ + Analyzes the per-code block tracking data using a tracking file and sub tracking file in order to produce summary statistics of resource usage for each individual code block. + """ + + def __init__(self, tracking_file: str | None, sub_tracking_file: str): + """ + :param tracking_file: Path to the file containing the resource usage at each timepoint collected by a ``Tracker`` object. + :param sub_tracking_file: Path to the file containing the start/stop timestamps of each call to a code block collected by a ``SubTracker`` object. + """ + self._tracking_proxy = _DataProxy.create(tracking_file) + self._sub_tracking_proxy = _DataProxy.create(sub_tracking_file) + + def read_static_data(self) -> pd.Series: + """ + Reads the static data from the tracking file, including the resource units of measurement and system capacities. + + :return: The static data. + """ + return self._tracking_proxy.read_static_data() + + def load_code_block_names(self) -> list[str]: + """ + Loads the list of the names of the code blocks that were sub-tracked. + + :return: The code block names. + """ + return self._sub_tracking_proxy.load_code_block_names() + + def combine_sub_tracking_files(self, files: list[str]): + """ + Combines multiple sub-tracking files, perhaps that came from multiple processes running simultaneously, into a single sub-tracking file. + + :param files: The list of sub-tracking files to combine. All must end in the same file extension i.e. either ".csv" or ".sqlite". + """ + self._sub_tracking_proxy.combine_files(files) + + def load_timestamp_pairs(self, code_block_name: str) -> list[tuple[float, float]]: + """ + Loads the pairs of start and stop timestamps for each call to a code block that was sub-tracked. + + :param code_block_name: The name of the code block to get timestamp pairs for. + :return: List of timestamp pairs. + """ + return self._sub_tracking_proxy.load_timestamp_pairs(code_block_name) + + def load_timepoints(self, timestamp_pairs: list[tuple[float, float]]) -> pd.DataFrame: + """ + Loads the resource usage measurements at each timepoint tracked within the timestamp pairs of a given code block. + + :param timestamp_pairs: The list of start and stop timestamp pairs of the code block. + :return: The timepoint measurements. + """ + return self._tracking_proxy.load_timepoints(timestamp_pairs) + + def overall_timepoint_results(self) -> pd.DataFrame: + """ + Computes summary statistics for resource measurements across all tracked timepoints as compared to an individual sub-tracked code block. + + :return: Summary statistics across all timepoints. + """ + return self._tracking_proxy.overall_timepoint_results() + + def sub_tracking_results(self) -> SubTrackingResults: + """ + Generates a detailed report including summary statistics for the overall resource usage across all timepoints as well as that of each code block that was sub-tracked. + + :return: A data object containing the overall summary statistics, summary statistics for each code block, the static data, etc. + """ + overall_results = self.overall_timepoint_results() + code_block_names = self.load_code_block_names() + static_data = self.read_static_data() + code_block_results = list[CodeBlockResults]() + for code_block_name in code_block_names: + time_stamp_pairs = self.load_timestamp_pairs(code_block_name) + time_stamp_diffs = pd.Series([stop_time - start_time for (start_time, stop_time) in time_stamp_pairs]) + compute_time_results = time_stamp_diffs.describe()[_SUMMARY_STATS] + compute_time_results['total'] = time_stamp_diffs.sum().item() + timepoints = self.load_timepoints(time_stamp_pairs) + num_non_empty_calls = sum( + [ + any( + (timepoints.timestamp >= start_time) & (timepoints.timestamp <= stop_time) + ) for start_time, stop_time in time_stamp_pairs + ] + ) + timepoints = timepoints.drop(columns='timestamp') + code_block_results.append( + CodeBlockResults( + name=code_block_name, num_timepoints=len(timepoints), num_calls=len(time_stamp_pairs), + num_non_empty_calls=num_non_empty_calls, compute_time=compute_time_results, + resource_usage=timepoints.describe().loc[_SUMMARY_STATS].T + ) + ) + return SubTrackingResults(overall_results, static_data, code_block_results) + + +class TrackingComparison: + """ + Compares multiple tracking sessions to determine differences in computational resource usage by loading sub-tracking results given their file paths. + Sub-tracking results files must be in pickle format e.g. calling the ``SubTrackingAnalyzer.compare`` method and storing the returned ``SubTrackingResults`` in a pickle file. + If code block results are not included in the sub-tracking files (i.e. no code blocks were sub-tracked), then only overall results are compared. + Code blocks are compared by their name. If their name only differentiates by line number (i.e. their name is of the form ), then it's assumed that the same order of the code blocks is used even if the line numbers are different. + This is useful to determine how resource usage changes based on differences in implementation, input data, etc. + + :ivar dict[str, SubTrackingResults] results_map: Mapping of the name of each tracking session to the ``SubTrackingResults`` of the corresponding tracking sessions. Can be used for a user-defined custom comparison. + """ + def __init__(self, file_path_map: dict[str, str]): + """ + :param file_path_map: Mapping of the name of each tracking session to the path of the pickle file containing the ``SubTrackingResults`` of the corresponding tracking sessions. Used to construct the ``results_map`` attribute. + :raises ValueError: Raised if the code block results of each tracking session don't match. + """ + for name in file_path_map.keys(): + self._name1 = name + break + self.results_map = dict[str, SubTrackingResults]() + for name, file in file_path_map.items(): + with open(file, 'rb') as file: + self.results_map[name] = pkl.load(file) + results1 = self.results_map[self._name1] + code_block_results1 = TrackingComparison._sort_code_block_results(results1) + results1.code_block_results = code_block_results1 + for name2, result in self.results_map.items(): + if name2 == self._name1: + continue + results = self.results_map[name2] + code_block_results2 = TrackingComparison._sort_code_block_results(results) + if len(code_block_results1) != len(code_block_results2): + raise ValueError( + f'All sub-tracking results must have the same number of code blocks. First has {len(code_block_results1)}' + f' code blocks but tracking session "{name2}" has {len(code_block_results2)} code blocks.' + ) + for code_block_results1_, code_block_results2_ in zip(code_block_results1, code_block_results2): + code_block_name1, code_block_name2 = code_block_results1_.name, code_block_results2_.name + if code_block_name1 != code_block_name2: + line_num1 = TrackingComparison._get_line_num(code_block_name1) + line_num2 = TrackingComparison._get_line_num(code_block_name2) + if line_num1 is not None and line_num2 is not None: + if code_block_name1.split(':')[:-1] == code_block_name2.split(':')[:-1]: + log.warning( + f'Code block name "{code_block_name1}" of tracking session "{self._name1}" matched with code' + f' block name "{code_block_name2}" of tracking session "{name2}" but they differ by ' + f'line number. If these code blocks were not meant to match, their comparison will not' + f' be valid and their names must be disambiguated.' + ) + match = True + else: + match = False + else: + match = False + else: + match = True + if not match: + raise ValueError( + f'Code block name "{code_block_name1}" of tracking session "{self._name1}" does not match code' + f' block name "{code_block_name2}" of tracking session "{name2}"' + ) + results.code_block_results = code_block_results2 + + def compare(self, statistic: str = 'mean') -> ComparisonResults: + """ + Performs the comparison between tracking sessions, comparing both the code block results and the overall results. + :param statistic: The summary statistic of the measurements to compare. One of 'min', 'max', 'mean', or 'std'. + :return: The results of the comparison including the overall resource usage, the resource usage of the code blocks, and the compute time of the code blocks for each tracking session. + """ + if statistic not in _SUMMARY_STATS: + raise ValueError( + f"Invalid summary statistic '{statistic}'. Valid values are {' '.join(_SUMMARY_STATS)}." + ) + results1 = self.results_map[self._name1] + overall_resource_usages = dict[str, pd.Series]() + code_block_resource_usages = dict[str, dict[str, pd.Series]]() + for measurement in results1.overall.index: + overall_comparison = {name: results.overall[statistic][measurement].item() for name, results in self.results_map.items()} + overall_resource_usages[measurement] = pd.Series(overall_comparison).sort_values(ascending=True) + if results1.code_block_results: + code_block_resource_usages[measurement] = TrackingComparison._get_code_block_comparisons( + self.results_map, lambda code_block_result: code_block_result.resource_usage[statistic][measurement].item() + ) + code_block_compute_times = TrackingComparison._get_code_block_comparisons( + self.results_map, lambda code_block_result: code_block_result.compute_time[statistic].item() + ) if results1.code_block_results else dict() + return ComparisonResults( + overall_resource_usage=overall_resource_usages, code_block_resource_usage=code_block_resource_usages, + code_block_compute_time=code_block_compute_times + ) + + @staticmethod + def _sort_code_block_results(results: SubTrackingResults) -> list[CodeBlockResults]: + max_line_num_len = 0 + for code_block_results in results.code_block_results: + line_num = TrackingComparison._get_line_num(code_block_results.name) + if line_num is not None: + max_line_num_len = max(max_line_num_len, len(line_num)) + return sorted( + results.code_block_results, key=lambda r: TrackingComparison._sort_code_block_name(r.name, max_line_num_len) + ) + + @staticmethod + def _sort_code_block_name(name: str, max_line_num_len: int) -> str: + line_num = TrackingComparison._get_line_num(name) + if line_num is not None: + line_num = line_num.zfill(max_line_num_len) + name = ':'.join(name.split(':')[:-1] + [line_num]) + return name + + @staticmethod + def _get_line_num(code_block_name: str) -> str | None: + if ':' in code_block_name: + line_num = code_block_name.split(':')[-1] + try: + int(line_num) + return line_num + except ValueError: + return None + return None + + @staticmethod + def _get_code_block_comparisons(name_to_results: dict[str, SubTrackingResults], get_statistic: typ.Callable) -> dict[str, pd.Series]: + code_block_comparisons = dict[str, pd.Series]() + for matching_code_block_results in zip( + *[ + [ + (name, code_block_results) for code_block_results in results.code_block_results + ] for name, results in name_to_results.items() + ] + ): + code_block_name = f'{" -> ".join({code_block_results.name for _, code_block_results in matching_code_block_results})}' + code_block_comparison = { + name: get_statistic(code_block_results) for name, code_block_results in matching_code_block_results + } + code_block_comparison = pd.Series(code_block_comparison).sort_values(ascending=True) + code_block_comparisons[code_block_name] = code_block_comparison + return code_block_comparisons + + +@dclass.dataclass +class CodeBlockResults: + """ + Results of a particular code block that was sub-tracked. + + :param name: The name of the code block. + :param num_timepoints: The number of timepoints tracked across all calls to the code block. + :param num_calls: The number times the code block was called / executed. + :param num_non_empty_calls: The number code block calls with at least one timepoint tracked within the start / stop time. + :param compute_time: Compute time measurements for the code block including the total time spent running this code block, the average time between the start / stop time, etc. + :param resource_usage: Summary statistics for the resource usage during the times the code block was called i.e. in between all its start / stop times + """ + name: str + num_timepoints: int + num_calls: int + num_non_empty_calls: int + compute_time: pd.Series + resource_usage: pd.DataFrame + + +@dclass.dataclass +class SubTrackingResults: + """ + Comprehensive results for a tracking session including resource usage measurements for individual code blocks. + + :param overall: The overall summary statistics across all timepoints tracked. + :param static_data: The static data measured during a tracking session. + :param code_block_results: Results for individual code blocks including summary statistics for the timepoints within each code block. + """ + overall: pd.DataFrame + static_data: pd.Series + code_block_results: list[CodeBlockResults] + + def to_json(self) -> dict: + """ + Converts the sub-tracking results into JSON format. + + :return: The JSON version of the sub-tracking results. + """ + results = dclass.asdict(self) + results['overall'] = _dataframe_to_json(results['overall']) + results['static_data'] = results['static_data'].to_dict() + for code_block_result in results['code_block_results']: + code_block_result['compute_time'] = code_block_result['compute_time'].to_dict() + code_block_result['resource_usage'] = _dataframe_to_json(code_block_result['resource_usage']) + return results + + def __str__(self) -> str: + """ + Converts the sub-tracking results to text format. + + :return: The string representation of the sub-tracking results. + """ + results = dclass.asdict(self) + return _dict_to_str('', results, 0) + + +def _dataframe_to_json(df: pd.DataFrame) -> dict: + result = dict() + for index, row in df.iterrows(): + result[index] = row.to_dict() + return result + + +def _dict_to_str(string: str, results: dict, indent: int, no_title_keys: set[str] | None = None) -> str: + indent_str = '\t' * indent + results = { + ( + f'{indent_str}{key.replace("_", " ").title() if no_title_keys is None or key not in no_title_keys else key}' + ): value for key, value in results.items() + } + max_key_len = max(len(key) for key in results.keys()) + for key, value in results.items(): + if type(value) is pd.Series: + value = value.to_frame().T + value = value.rename({value.index[0]: ''}) + if type(value) is dict: + string += f'{key}:\n' + string = _dict_to_str(string, value, indent + 1, no_title_keys) + elif type(value) is pd.DataFrame: + string += f'{key}:\n' + with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 5000): + df_str = str(value) + df_str = '\n'.join(indent_str + '\t' + line for line in df_str.splitlines()) + string += df_str + '\n' + elif type(value) is list: + string += f'{key}:\n' + for value in value: + string = _dict_to_str(string, value, indent + 1, no_title_keys) + '\n' + else: + value = f'{value:.4f}' if type(value) is float else value + string += f'{key}:{" " * (max_key_len - len(key))} {value}\n' + return string + + +@dclass.dataclass +class ComparisonResults: + """ + Contains the comparison of the measurements of multiple tracking sessions provided by the ``TrackingComparison`` class's ``compare`` method. + + :param overall_resource_usage: For each measurement, compares the resource usage across tracking sessions. + :param code_block_resource_usage: For each measurement and for each code block, compares the resource usage of the code block across tracking sessions. + :param code_block_compute_time: For each code block, compares the compute time of the code block across tracking sessions. + """ + overall_resource_usage: dict[str, pd.Series] + code_block_resource_usage: dict[str, dict[str, pd.Series]] + code_block_compute_time: dict[str, pd.Series] + + def to_json(self) -> dict: + """ + Converts the tracking comparison results into JSON format. + + :return: The JSON version of the comparison results. + """ + results = dclass.asdict(self) + results['overall_resource_usage'] = ComparisonResults._comparisons_to_dict(results['overall_resource_usage']) + for measurement, comparisons in results['code_block_resource_usage'].items(): + results['code_block_resource_usage'][measurement] = ComparisonResults._comparisons_to_dict(comparisons) + results['code_block_compute_time'] = ComparisonResults._comparisons_to_dict(results['code_block_compute_time']) + return results + + @staticmethod + def _comparisons_to_dict(comparisons: dict[str, pd.Series]) -> dict: + return {name: comparison.to_dict() for name, comparison in comparisons.items()} + + def __str__(self) -> str: + """ + Converts the tracking comparison results to text format. + + :return: The string representation of the comparison results. + """ + results = dclass.asdict(self) + return _dict_to_str('', results, 0, no_title_keys=set(name for name in self.code_block_compute_time.keys())) diff --git a/src/gpu_tracker/tracker.py b/src/gpu_tracker/tracker.py index 3df55ac..066c120 100644 --- a/src/gpu_tracker/tracker.py +++ b/src/gpu_tracker/tracker.py @@ -13,7 +13,7 @@ import pickle as pkl import uuid import pandas as pd -from ._helper_classes import _NvidiaQuerier, _AMDQuerier, _Writer, _TimepointUsage +from ._helper_classes import _NvidiaQuerier, _AMDQuerier, _DataProxy, _TimepointUsage, _StaticData class _TrackingProcess(mproc.Process): @@ -42,7 +42,8 @@ class _TrackingProcess(mproc.Process): def __init__( self, stop_event: mproc.Event, sleep_time: float, ram_unit: str, gpu_ram_unit: str, time_unit: str, n_expected_cores: int | None, gpu_uuids: set[str] | None, disable_logs: bool, main_process_id: int, - resource_usage_file: str, extraneous_process_ids: set[int], gpu_brand: str | None, tracking_file: str | None): + resource_usage_file: str, extraneous_process_ids: set[int], gpu_brand: str | None, tracking_file: str | None, + overwrite: bool): super().__init__() self._stop_event = stop_event if sleep_time < _TrackingProcess._CPU_PERCENT_INTERVAL: @@ -64,7 +65,6 @@ def __init__( self._is_linux = platform.system().lower() == 'linux' cannot_connect_warning = ('The {} command is installed but cannot connect to a GPU. ' 'The GPU RAM and GPU utilization values will remain 0.0.') - self.tracking_file = _Writer.create(tracking_file) if gpu_brand is None: nvidia_available = _NvidiaQuerier.is_available() nvidia_installed = nvidia_available is not None @@ -119,6 +119,13 @@ def __init__( self._resource_usage = ResourceUsage( max_ram=max_ram, max_gpu_ram=max_gpu_ram, cpu_utilization=cpu_utilization, gpu_utilization=gpu_utilization, compute_time=compute_time) + self.data_proxy = _DataProxy.create(tracking_file, overwrite) + if self.data_proxy is not None: + static_data = _StaticData( + ram_unit, gpu_ram_unit, time_unit, max_ram.system_capacity, max_gpu_ram.system_capacity, system_core_count, + cpu_utilization.n_expected_cores, gpu_utilization.system_gpu_count, gpu_utilization.n_expected_gpus + ) + self.data_proxy.write_static_data(static_data) self._resource_usage_file = resource_usage_file self._extraneous_process_ids = extraneous_process_ids @@ -229,8 +236,8 @@ def run(self): timepoint_usage.timestamp = time.time() self._resource_usage.compute_time.time = (timepoint_usage.timestamp - start_time) * self._time_coefficient self._tracking_iteration += 1 - if self.tracking_file: - self.tracking_file.write_row(timepoint_usage) + if self.data_proxy: + self.data_proxy.write_data(timepoint_usage) time.sleep(self._sleep_time - _TrackingProcess._CPU_PERCENT_INTERVAL) except psutil.NoSuchProcess as error: self._log_warning(f'Failed to track a process (PID: {error.pid}) that does not exist. ' @@ -319,7 +326,7 @@ def _log_warning(self, warning: str): class Tracker: """ Runs a sub-process that tracks computational resources of the calling process. Including the compute time, maximum CPU utilization, mean CPU utilization, maximum RAM, and maximum GPU RAM used within a context manager or explicit calls to ``start()`` and ``stop()`` methods. - Calculated quantities are scaled depending on the units chosen for them (e.g. megabytes vs. gigabytes, hours vs. days, etc.). + Calculated quantities are scaled, depending on the units chosen for them (e.g. megabytes vs. gigabytes, hours vs. days, etc.). :ivar ResourceUsage resource_usage: Data class containing the computational resource usage data collected by the tracking process. """ @@ -335,7 +342,7 @@ def __init__( self, sleep_time: float = 1.0, ram_unit: str = 'gigabytes', gpu_ram_unit: str = 'gigabytes', time_unit: str = 'hours', n_expected_cores: int = None, gpu_uuids: set[str] = None, disable_logs: bool = False, process_id: int = None, resource_usage_file: str | None = None, n_join_attempts: int = 5, join_timeout: float = 10.0, - gpu_brand: str | None = None, tracking_file: str | None = None): + gpu_brand: str | None = None, tracking_file: str | None = None, overwrite: bool = False): """ :param sleep_time: The number of seconds to sleep in between usage-collection iterations. :param ram_unit: One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'. @@ -350,6 +357,7 @@ def __init__( :param join_timeout: The amount of time the tracker waits for its underlying sub-process to join. :param gpu_brand: The brand of GPU to profile. Valid values are "nvidia" and "amd". Defaults to the brand of GPU detected in the system, checking Nvidia first. :param tracking_file: If specified, stores the individual resource usage measurements at each iteration. Valid file formats are CSV (.csv) and SQLite (.sqlite) where the SQLite file format stores the data in a table called "data" and allows for more efficient querying. + :param overwrite: Whether to overwrite the ``tracking_file`` if it already existed before the beginning of this tracking session. :raises ValueError: Raised if invalid arguments are provided. """ current_process_id = os.getpid() @@ -364,7 +372,7 @@ def __init__( self._tracking_process = _TrackingProcess( self._stop_event, sleep_time, ram_unit, gpu_ram_unit, time_unit, n_expected_cores, gpu_uuids, disable_logs, process_id if process_id is not None else current_process_id, self._resource_usage_file, extraneous_ids, gpu_brand, - tracking_file) + tracking_file, overwrite) self.resource_usage = None self.n_join_attempts = n_join_attempts self.join_timeout = join_timeout From bc1f42f525ef3d9b31fc503bdd385c9b4545821e Mon Sep 17 00:00:00 2001 From: erikhuck Date: Mon, 5 May 2025 21:22:31 -0400 Subject: [PATCH 2/7] Updates the documentation accordingly --- README.rst | 2 +- docs/conf.py | 4 +- docs/notebook/combined-file.csv | 25 + docs/notebook/config.json | 6 + docs/notebook/correct_nbconvert.py | 1 + docs/notebook/my-results.pkl | Bin 0 -> 5283 bytes docs/notebook/results.pkl | Bin 0 -> 5283 bytes docs/notebook/results2.pkl | Bin 0 -> 5283 bytes docs/notebook/sub-tracking.csv | 13 + docs/notebook/sub-tracking1.csv | 13 + docs/notebook/sub-tracking2.csv | 13 + docs/notebook/tracking.csv | 28 + docs/notebook/tutorial.ipynb | 2486 +++++++++++++++++++++++++--- docs/tutorial.rst | 2061 ++++++++++++++++++++--- 14 files changed, 4271 insertions(+), 381 deletions(-) create mode 100644 docs/notebook/combined-file.csv create mode 100644 docs/notebook/config.json create mode 100644 docs/notebook/my-results.pkl create mode 100644 docs/notebook/results.pkl create mode 100644 docs/notebook/results2.pkl create mode 100644 docs/notebook/sub-tracking.csv create mode 100644 docs/notebook/sub-tracking1.csv create mode 100644 docs/notebook/sub-tracking2.csv create mode 100644 docs/notebook/tracking.csv diff --git a/README.rst b/README.rst index 9e32de0..eaf3ca1 100644 --- a/README.rst +++ b/README.rst @@ -5,7 +5,7 @@ Description ----------- The ``gpu_tracker`` package provides a ``Tracker`` class and a commandline-interface that tracks (profiles) the usage of compute time, CPU utilization, maximum RAM, GPU utilization, and maximum GPU RAM. The compute time is a measurement of the real time taken by the task as opposed to the CPU-utilization time. -The GPU tracking is for Nvidia GPUs and uses the ``nvidia-smi`` command. If the Nvidia drivers have not been installed, then the max GPU RAM is not tracked and measurements are reported as 0. +The GPU tracking is for both Nvidia and AMD GPUs and respectively uses the nvidia-smi and amd-smi command-line tools to pull GPU usage information. Also, if neither the appropriate Nvidia nor AMD driver is installed, then the GPU-related metrics are not tracked and are reported as 0. Computational resources are tracked throughout the duration of a context manager or the duration of explicit calls to the ``start()`` and ``stop()`` methods of the ``Tracker`` class. The ``gpu-tracker`` command-line interface alternatively tracks the computational-resource-usage of an arbitrary shell command. diff --git a/docs/conf.py b/docs/conf.py index 966388d..f9917a8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,8 +14,8 @@ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = 'gpu_tracker' -copyright = '2024, Erik Huckvale, Hunter N. B. Moseley' -author = 'Erik Huckvale, Hunter N. B. Moseley' +copyright = '2024, Erik D. Huckvale, Hunter N. B. Moseley' +author = 'Erik D. Huckvale, Hunter N. B. Moseley' version = __version__ release = __version__ diff --git a/docs/notebook/combined-file.csv b/docs/notebook/combined-file.csv new file mode 100644 index 0000000..458c334 --- /dev/null +++ b/docs/notebook/combined-file.csv @@ -0,0 +1,25 @@ +process_id,code_block_name,position,timestamp +265016,my-code-block,0,1746129778.99568 +265016,my-code-block,1,1746129781.7855892 +265016,my-function,0,1746129781.7872624 +265016,my-function,1,1746129784.3252733 +265016,my-code-block,0,1746129784.3266726 +265016,my-code-block,1,1746129786.9071054 +265016,my-function,0,1746129786.9082048 +265016,my-function,1,1746129789.4858837 +265016,my-code-block,0,1746129789.486994 +265016,my-code-block,1,1746129792.0702057 +265016,my-function,0,1746129792.0713246 +265016,my-function,1,1746129794.6151624 +265016,my-code-block,0,1746129778.99568 +265016,my-code-block,1,1746129781.7855892 +265016,my-function,0,1746129781.7872624 +265016,my-function,1,1746129784.3252733 +265016,my-code-block,0,1746129784.3266726 +265016,my-code-block,1,1746129786.9071054 +265016,my-function,0,1746129786.9082048 +265016,my-function,1,1746129789.4858837 +265016,my-code-block,0,1746129789.486994 +265016,my-code-block,1,1746129792.0702057 +265016,my-function,0,1746129792.0713246 +265016,my-function,1,1746129794.6151624 diff --git a/docs/notebook/config.json b/docs/notebook/config.json new file mode 100644 index 0000000..6675ff2 --- /dev/null +++ b/docs/notebook/config.json @@ -0,0 +1,6 @@ +{ + "sleep_time": 0.5c, + "ram_unit": "megabytes", + "gpu_ram_unit": "megabytes", + "time_unit": "seconds" +} diff --git a/docs/notebook/correct_nbconvert.py b/docs/notebook/correct_nbconvert.py index d1765eb..23daf6a 100644 --- a/docs/notebook/correct_nbconvert.py +++ b/docs/notebook/correct_nbconvert.py @@ -5,6 +5,7 @@ contents: str = contents.replace('python3\n\n !', 'none\n\n !') contents: str = contents.replace('parsed-literal::', 'code:: none') contents: str = contents.replace('!', '$ ') +contents: str = contents.replace('os._exit(0)', '') contents = '.. _tutorial-label:\n\n' + contents with open('tutorial.rst', 'w') as file: diff --git a/docs/notebook/my-results.pkl b/docs/notebook/my-results.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d2825f84c3ee66067d9d771f956bd1d4d77c5f7b GIT binary patch literal 5283 zcmds5e^3?xkD6tx+jj3%$+wa@GTaSY~ z%+SC3W^Q-id*AQ(ec!(C_w)ViZLscoX@W&>>cpm?!pb4e)69n)lH!g$Yidrl;;x|+ zg4nd2mlU5YX*;!<8?`pASiL8(mJe}0pQerva-x@$9G*akcQl4LKd-52OE{VPf&Of2 zSXqBlNK>aqiwmO6heXaNIsBZ+HSr-yQ!~qa0Z;RjMjsbc^THaePjE{PQ$!vzX*e)vmV5{MbA6*u>P>5?qt89q(1Vl*=DIS>$+H^rpy$7AwMRsaVOS@K{ zlGEz6Z`Q!B+PrdWkf%!H8s}@B9magJnzT@>s;YY8KiKq((;{o`j$Nn%=WFSV{t`o_^!auRf2_kGG5X`X6=CHiMj`x7S^y;6MnrpKKuQ7xx^$ zdUzf={4Q-@Fo)=E!2_gOsl4v`>$l0TdVA`2EWd56zoFXD@2V$@U7vMNUrLak z^upw@-&2Ub-8lsWU(#0JjC^9L`Q@75rT5*X|MB(}9v{Q{8b+#`>Q)5I5K+QxkbHuN zhPcy$VV^`J+&PY3+qDMF_DMsN*2}l>lEckOnCj`Q$gg7`7jX>L;XA4=@=RgE>O~;X zU;<2}$pw?m)WXhGb==Y+>GY$RbYjrSPP%-uUl29CaxI!__46F8ePAhHNfh#+2G^6-cNqAVFBnVx{(Er`6==%!1plFa+V zbGjnMOIe46*u2ST&pvOh$S>hV-G5fu;Xd$sa6)t1lB$p@{u~o`gxhtbD#l} zjFV-oaBZ5J!Z!N2CjD_D7SX^uiS;T$OhHcOSs&--eUjFJ#aPXdWQ-0EtD8`BqSH{~ zLjsRSpHgi+L<%irKql61EZROkrj%msE^bJaBx!e&1VbFltAO#C8IId*XU0clkt^VFC^X!&MV4;~@*Wv;j&caxQg&3rgr)9{l0ttZ zJ@+H;pTkWJgP1#eNox|CICrZ|O-Dc5AfVuRK#rOsc{m>zq5xJwYMKS^iEQT^DS8Z}Y=6s{aAJr`AEH9?V`_8LY zg11lJU6w-cWcOSf>Fca~^UGsLC>3x8yQdLak`tfvFkj$8%=yAQwE7)_UO22DCLDzU zIHn(-E#~mxeOL`Ah8|WGo>dke5T=C(Q(Qtuu0g8#wz6RS@H8_>a=}pA`|<(*+mGM8 z%J4`3G4G+iL8j1g>F6zGblMrAz47#8X|J=W<^JZY6G-EVw$C1G|InmW5!!k|r>8wk z#qk**C4b&!&f_~z*y8EO(%y=#zDr;4yU3i+t~m1V*(dMXe!|5qcWCPh+Rm1co43`$ z-|zYRUD}VYAECXkyo{~hMaI)6tR(whgYQo%B_{3FX>nH($vEiBICyy{_x`vd?Q%Hy2rjReLVRICaZT<6~@1K-U?t9~rMb_(Mr@doc zpXQjJ4Z}HZ0(`Z`VnB4(CD1b|Cse^wcnX%ma;SzHSOHH%E!06h{1BdjmCyjIfCU$D z;EuMevkCwZFj1SM6IF35Za5iE%L+9EVX{#XJ(NXiaLSnpr=7|0NBo@`8`Qr5!Au0 zZ-i~#xo379+JD9Tq`$H4RK?u4{>Iea+|l{d{d-2u&C&GjWs6fQyC;xKHLvu4SY{{n z)1R%f=h?}hAKUoVf}Xw1v#SruJ13oCBG}Zh?f8x2l^2+ETi)`%=X!nA^+(g6rh{9b z`ANU+MKNd7(E+AsMEj}4z_4y+e`Vd}^#`xStRHXQ5!Q>n8`eg@#_C1-7Rz_(w|4F- z^OJrFZG|j3aQQc7tx4I&cxEAqeA1WpK3Cd%ypSxIccZSd_AF!k6;Nw_(qA}_?bmf= w@9BMCmo(c9tlU1${G`_w4engjnoml^i?0qmfA_T|-X6!g{{IiQ@rv8=U&`lCa{vGU literal 0 HcmV?d00001 diff --git a/docs/notebook/results.pkl b/docs/notebook/results.pkl new file mode 100644 index 0000000000000000000000000000000000000000..c2c69deee351e08abf9a28a6ee4ac15b2b9edba4 GIT binary patch literal 5283 zcmds5e{dA_6~Dk;$Uz#CkQ3A~b73Mxg_D#r{3z4?LWmtW8zB&qpvmTYcbmLT?)I*? zyCk8+8Zj|xw}q6&fYfQ6&a`!gp=G8Pu~;G$qQeiV{Ad9g99yJ_PU~Q6>lE$#?%pkz z3wH^FfB7A9yWj8kz4v{)@4e6aEKk|DZ7;AHLz~(ZQF$dw{VhV&Bdfmny{_f2R()&O zhA1^v3bGnhWPPjtK&RfJmuM5h>xC!{26ZhjLZtweJ^pZ1@H9qgNYFLsGOEx=jC)JN z&WDlGdE=r0Jm1t1*gs4QDgs7}**-L|Af6I5wIUcCp71Q8@qEGf%D!MkASEY#P zZwU&#kLfUBm!yUw?WQv!bXla)C~Zfpd{ppCftwdWko~Yxlakb#Q5pfYKfjn9{h$1$u9fxj$aZSgco6RIK;{hwA*Of zt+soot@G>|u414+vQYhq>sc0PzMx;@a?59RyMMNiGhcq{-_i7sIR5a<0x_Tbs_nfW ztvhiSSp&5td)zMa$ALb2tn`>=|MSyrS7(&I%Uurd3*6ZA4!7?}W9G|G9J2OPdtmOQ z((k&q>XpK-quj;f%+76PSGc$2-Ok@OUFODryz}fMKN!3H^yillbKgUrnf$H)yt`>O zao>BQZpPZHWAs0@{_$gMc!+U*l#&bccXb@vP){!G4V;<0aW3iYVdE1nH;;{(q;JE0 zFa3E{F)9AnoS_4s{g-=w2aa=V$y_#0)sjujze6=-(X55%FZJ0;TPqud#JJ2aA~h3F zf41#uH~DnYqiqj{j*ij))Nkib&(|?iwJe`1VueUDR)ZWA{Vc`3Hca~rmf_xU?6*s= z!)l*4yl4YLn;?69RK`-D$V);izg)sKRELK(NBo|_ggt;jphE#nWyJ+|Tcw4QLT%h* z!^&x6QaN$xWG_2@d`OgZ{H1NWW)BIJO=KmY3+yTE5`&xywHv*KQkaX!!n~0uy70us zhPA8^6(t_Q2@xb9$bJD4KvHCLCEFhk`9w(wnA3@JyQ~PI*q))t@X}D8lBjI0=2`}< zrJR@%YvEpYC#xY|Rm7mUfhuBH;v+&7T~Tz+&31uC1FbYF+*%~Z@I>Mjhzc}-@pC2m zlCYj{d6Td>-Or9UA1tL8GZTc;JL6`OhC?Zh-EWDcH)R}}_$VnY&SDl5CDR)&X5!Yn zNNM$fVPa`bvfP7mGwlSHdzzicHi5m5rF^^(@*zQ?Mh(%rumq_h@ImSmg0kL&%~+cxE0`UA-Y}u%CzhcsL`4BZpSjw+i8or91licT zv1tc|q*jW(yQD5%kvMM_38pxDL0{DVp5Y6q|hJPsQvi+r}$7KAm-+?IGe;K z>T6e+>BM^ngjI|O5yf@bpk*5DNk*h+ke#`n235APMax3Q#v`-BNeMOvr`Z@(s>&?+0biS$ zdeF?Wn>T=%La;*z3I8HJG7Ncbn8k=fffz?a8fG#{#C5P~xn;OxWN9+PgmUd`q4wEK zbGFIuv>c>tJj=dF7+IjKdtI87En)1iUc3%%3Y$rYr9vp8wBKB3pdL}Nw3xCEipgQI zP})YV2uHjkfWQ)13T2u@2`e->dj2uZqTaF+7T&wF9Rx3fu1_)<<;MO+Hz`F zzRIY;KV26dB&;Q;kMjs$;8Ie2;Z@fC_QPM`H6xfX2=Bw|M(`X-!Gpul3`d3ss{&({ zg8{;_@MDR~$jEibvffq>;T8JadG6E?@7uYf zWyspccb;~n(od$n9fzwo9-u8+H=7J9@15H`;~!)6 zKec``?d>aG{*z*7J-OoD9LlX-FoO2dY29>MW-;0M^@+bG;J=d&6!s$nF@O@puYWDH2Gx-@zg z<%9}&3|7J_sD#z92C85!R6`Ba!a8^yo`5>22Ob)Lf-ljs!72bi#6oRJEL7E}`rvr1 zEX%b?2$PMf1tLvEVz`zzJkpBpu|N7KLF->>pV3#?E2->kOXlmB-Yu{E68TlwQRxHu-ouV2JI z?Jq9cf3@g$1B+8vjky!oi(ixVxZ&+~tWgdG3? literal 0 HcmV?d00001 diff --git a/docs/notebook/results2.pkl b/docs/notebook/results2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d2825f84c3ee66067d9d771f956bd1d4d77c5f7b GIT binary patch literal 5283 zcmds5e^3?xkD6tx+jj3%$+wa@GTaSY~ z%+SC3W^Q-id*AQ(ec!(C_w)ViZLscoX@W&>>cpm?!pb4e)69n)lH!g$Yidrl;;x|+ zg4nd2mlU5YX*;!<8?`pASiL8(mJe}0pQerva-x@$9G*akcQl4LKd-52OE{VPf&Of2 zSXqBlNK>aqiwmO6heXaNIsBZ+HSr-yQ!~qa0Z;RjMjsbc^THaePjE{PQ$!vzX*e)vmV5{MbA6*u>P>5?qt89q(1Vl*=DIS>$+H^rpy$7AwMRsaVOS@K{ zlGEz6Z`Q!B+PrdWkf%!H8s}@B9magJnzT@>s;YY8KiKq((;{o`j$Nn%=WFSV{t`o_^!auRf2_kGG5X`X6=CHiMj`x7S^y;6MnrpKKuQ7xx^$ zdUzf={4Q-@Fo)=E!2_gOsl4v`>$l0TdVA`2EWd56zoFXD@2V$@U7vMNUrLak z^upw@-&2Ub-8lsWU(#0JjC^9L`Q@75rT5*X|MB(}9v{Q{8b+#`>Q)5I5K+QxkbHuN zhPcy$VV^`J+&PY3+qDMF_DMsN*2}l>lEckOnCj`Q$gg7`7jX>L;XA4=@=RgE>O~;X zU;<2}$pw?m)WXhGb==Y+>GY$RbYjrSPP%-uUl29CaxI!__46F8ePAhHNfh#+2G^6-cNqAVFBnVx{(Er`6==%!1plFa+V zbGjnMOIe46*u2ST&pvOh$S>hV-G5fu;Xd$sa6)t1lB$p@{u~o`gxhtbD#l} zjFV-oaBZ5J!Z!N2CjD_D7SX^uiS;T$OhHcOSs&--eUjFJ#aPXdWQ-0EtD8`BqSH{~ zLjsRSpHgi+L<%irKql61EZROkrj%msE^bJaBx!e&1VbFltAO#C8IId*XU0clkt^VFC^X!&MV4;~@*Wv;j&caxQg&3rgr)9{l0ttZ zJ@+H;pTkWJgP1#eNox|CICrZ|O-Dc5AfVuRK#rOsc{m>zq5xJwYMKS^iEQT^DS8Z}Y=6s{aAJr`AEH9?V`_8LY zg11lJU6w-cWcOSf>Fca~^UGsLC>3x8yQdLak`tfvFkj$8%=yAQwE7)_UO22DCLDzU zIHn(-E#~mxeOL`Ah8|WGo>dke5T=C(Q(Qtuu0g8#wz6RS@H8_>a=}pA`|<(*+mGM8 z%J4`3G4G+iL8j1g>F6zGblMrAz47#8X|J=W<^JZY6G-EVw$C1G|InmW5!!k|r>8wk z#qk**C4b&!&f_~z*y8EO(%y=#zDr;4yU3i+t~m1V*(dMXe!|5qcWCPh+Rm1co43`$ z-|zYRUD}VYAECXkyo{~hMaI)6tR(whgYQo%B_{3FX>nH($vEiBICyy{_x`vd?Q%Hy2rjReLVRICaZT<6~@1K-U?t9~rMb_(Mr@doc zpXQjJ4Z}HZ0(`Z`VnB4(CD1b|Cse^wcnX%ma;SzHSOHH%E!06h{1BdjmCyjIfCU$D z;EuMevkCwZFj1SM6IF35Za5iE%L+9EVX{#XJ(NXiaLSnpr=7|0NBo@`8`Qr5!Au0 zZ-i~#xo379+JD9Tq`$H4RK?u4{>Iea+|l{d{d-2u&C&GjWs6fQyC;xKHLvu4SY{{n z)1R%f=h?}hAKUoVf}Xw1v#SruJ13oCBG}Zh?f8x2l^2+ETi)`%=X!nA^+(g6rh{9b z`ANU+MKNd7(E+AsMEj}4z_4y+e`Vd}^#`xStRHXQ5!Q>n8`eg@#_C1-7Rz_(w|4F- z^OJrFZG|j3aQQc7tx4I&cxEAqeA1WpK3Cd%ypSxIccZSd_AF!k6;Nw_(qA}_?bmf= w@9BMCmo(c9tlU1${G`_w4engjnoml^i?0qmfA_T|-X6!g{{IiQ@rv8=U&`lCa{vGU literal 0 HcmV?d00001 diff --git a/docs/notebook/sub-tracking.csv b/docs/notebook/sub-tracking.csv new file mode 100644 index 0000000..b2037f1 --- /dev/null +++ b/docs/notebook/sub-tracking.csv @@ -0,0 +1,13 @@ +process_id,code_block_name,position,timestamp +265016,my-code-block,0,1746129778.99568 +265016,my-code-block,1,1746129781.7855892 +265016,my-function,0,1746129781.7872624 +265016,my-function,1,1746129784.3252733 +265016,my-code-block,0,1746129784.3266726 +265016,my-code-block,1,1746129786.9071054 +265016,my-function,0,1746129786.9082048 +265016,my-function,1,1746129789.4858837 +265016,my-code-block,0,1746129789.486994 +265016,my-code-block,1,1746129792.0702055 +265016,my-function,0,1746129792.0713246 +265016,my-function,1,1746129794.6151624 diff --git a/docs/notebook/sub-tracking1.csv b/docs/notebook/sub-tracking1.csv new file mode 100644 index 0000000..b2037f1 --- /dev/null +++ b/docs/notebook/sub-tracking1.csv @@ -0,0 +1,13 @@ +process_id,code_block_name,position,timestamp +265016,my-code-block,0,1746129778.99568 +265016,my-code-block,1,1746129781.7855892 +265016,my-function,0,1746129781.7872624 +265016,my-function,1,1746129784.3252733 +265016,my-code-block,0,1746129784.3266726 +265016,my-code-block,1,1746129786.9071054 +265016,my-function,0,1746129786.9082048 +265016,my-function,1,1746129789.4858837 +265016,my-code-block,0,1746129789.486994 +265016,my-code-block,1,1746129792.0702055 +265016,my-function,0,1746129792.0713246 +265016,my-function,1,1746129794.6151624 diff --git a/docs/notebook/sub-tracking2.csv b/docs/notebook/sub-tracking2.csv new file mode 100644 index 0000000..b2037f1 --- /dev/null +++ b/docs/notebook/sub-tracking2.csv @@ -0,0 +1,13 @@ +process_id,code_block_name,position,timestamp +265016,my-code-block,0,1746129778.99568 +265016,my-code-block,1,1746129781.7855892 +265016,my-function,0,1746129781.7872624 +265016,my-function,1,1746129784.3252733 +265016,my-code-block,0,1746129784.3266726 +265016,my-code-block,1,1746129786.9071054 +265016,my-function,0,1746129786.9082048 +265016,my-function,1,1746129789.4858837 +265016,my-code-block,0,1746129789.486994 +265016,my-code-block,1,1746129792.0702055 +265016,my-function,0,1746129792.0713246 +265016,my-function,1,1746129794.6151624 diff --git a/docs/notebook/tracking.csv b/docs/notebook/tracking.csv new file mode 100644 index 0000000..b83e7c5 --- /dev/null +++ b/docs/notebook/tracking.csv @@ -0,0 +1,28 @@ +ram_unit,gpu_ram_unit,time_unit,ram_system_capacity,gpu_ram_system_capacity,system_core_count,n_expected_cores,system_gpu_count,n_expected_gpus +gigabytes,gigabytes,hours,67.254165504,16.376,12,12,1,1 +main_ram,descendants_ram,combined_ram,system_ram,main_gpu_ram,descendants_gpu_ram,combined_gpu_ram,system_gpu_ram,gpu_sum_utilization_percent,gpu_hardware_utilization_percent,main_n_threads,descendants_n_threads,combined_n_threads,cpu_system_sum_utilization_percent,cpu_system_hardware_utilization_percent,cpu_main_sum_utilization_percent,cpu_main_hardware_utilization_percent,cpu_descendants_sum_utilization_percent,cpu_descendants_hardware_utilization_percent,cpu_combined_sum_utilization_percent,cpu_combined_hardware_utilization_percent,timestamp +0.341860352,0.0,0.341860352,4.859711488,0.0,0.0,0.0,0.215,0.0,0.0,12,0,12,11.900000000000002,0.9916666666666668,100.3,8.358333333333333,0,0.0,100.3,8.358333333333333,1746129779.2048252 +0.731631616,0.0,0.731631616,5.008326656,0.0,0.0,0.0,0.215,0.0,0.0,12,0,12,127.5,10.625,103.1,8.591666666666667,0,0.0,103.1,8.591666666666667,1746129779.8564913 +0.661712896,0.0,0.661712896,5.115432960000001,0.0,0.0,0.0,0.218,0.0,0.0,14,0,14,116.89999999999999,9.741666666666665,79.6,6.633333333333333,0,0.0,79.6,6.633333333333333,1746129780.5009012 +0.851456,0.0,0.851456,5.207662592,0.314,0.0,0.314,0.535,0.0,0.0,15,0,15,123.1,10.258333333333333,99.2,8.266666666666667,0,0.0,99.2,8.266666666666667,1746129781.1513443 +0.912490496,0.0,0.912490496,5.279162368000001,0.314,0.0,0.314,0.535,0.0,0.0,15,0,15,124.39999999999999,10.366666666666665,101.5,8.458333333333334,0,0.0,101.5,8.458333333333334,1746129781.8066156 +0.8645918720000001,0.0,0.8645918720000001,5.239013376,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,124.3,10.358333333333333,97.1,8.091666666666667,0,0.0,97.1,8.091666666666667,1746129782.4623022 +0.9303490560000001,0.0,0.9303490560000001,5.31324928,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,133.4,11.116666666666667,100.4,8.366666666666667,0,0.0,100.4,8.366666666666667,1746129783.1200194 +0.8646123520000001,0.0,0.8646123520000001,5.274169344000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,125.9,10.491666666666667,100.0,8.333333333333334,0,0.0,100.0,8.333333333333334,1746129783.7685237 +0.936558592,0.0,0.936558592,5.288230912,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,122.9,10.241666666666667,98.3,8.191666666666666,0,0.0,98.3,8.191666666666666,1746129784.421019 +0.864616448,0.0,0.864616448,5.5536435200000005,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,123.39999999999999,10.283333333333333,102.7,8.558333333333334,0,0.0,102.7,8.558333333333334,1746129785.064131 +0.9364152320000001,0.0,0.9364152320000001,5.527478272000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,122.4,10.200000000000001,98.2,8.183333333333334,0,0.0,98.2,8.183333333333334,1746129785.716852 +0.86462464,0.0,0.86462464,5.200162816000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,127.4,10.616666666666667,95.2,7.933333333333334,0,0.0,95.2,7.933333333333334,1746129786.3666322 +0.9391349760000001,0.0,0.9391349760000001,5.265408000000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,121.8,10.15,99.4,8.283333333333333,0,0.0,99.4,8.283333333333333,1746129787.0165348 +0.8646492160000001,0.0,0.8646492160000001,5.20341504,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,130.8,10.9,103.2,8.6,0,0.0,103.2,8.6,1746129787.6673303 +0.9443737600000001,0.0,0.9443737600000001,5.268512768,0.506,0.0,0.506,0.727,3.0,3.0,15,0,15,121.9,10.158333333333333,94.7,7.891666666666667,0,0.0,94.7,7.891666666666667,1746129788.3078256 +0.864997376,0.0,0.864997376,5.209337856,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,123.0,10.25,95.9,7.991666666666667,0,0.0,95.9,7.991666666666667,1746129788.9610796 +0.8579727360000001,0.0,0.8579727360000001,5.234085888,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,114.39999999999999,9.533333333333333,87.5,7.291666666666667,0,0.0,87.5,7.291666666666667,1746129789.588663 +0.8650014720000001,0.0,0.8650014720000001,5.219196928000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,120.0,10.0,100.7,8.391666666666667,0,0.0,100.7,8.391666666666667,1746129790.1870134 +0.927977472,0.0,0.927977472,5.271433216,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,130.8,10.9,97.9,8.158333333333333,0,0.0,97.9,8.158333333333333,1746129790.7797492 +0.8650096640000001,0.0,0.8650096640000001,5.296881664000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,123.0,10.25,96.3,8.025,0,0.0,96.3,8.025,1746129791.4257803 +0.9221242880000001,0.0,0.9221242880000001,5.315219456,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,122.99999999999999,10.249999999999998,100.6,8.383333333333333,0,0.0,100.6,8.383333333333333,1746129792.0727892 +0.8650137600000001,0.0,0.8650137600000001,5.314588672,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,120.29999999999998,10.024999999999999,99.9,8.325000000000001,0,0.0,99.9,8.325000000000001,1746129792.721361 +0.9266216960000001,0.0,0.9266216960000001,5.3080064,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,120.29999999999998,10.024999999999999,94.9,7.908333333333334,0,0.0,94.9,7.908333333333334,1746129793.3718987 +0.865017856,0.0,0.865017856,5.268705280000001,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,125.7,10.475,98.5,8.208333333333334,0,0.0,98.5,8.208333333333334,1746129794.023684 +0.932110336,0.0,0.932110336,5.2950917120000005,0.506,0.0,0.506,0.727,0.0,0.0,15,0,15,121.8,10.15,78.0,6.5,0,0.0,78.0,6.5,1746129794.6729133 diff --git a/docs/notebook/tutorial.ipynb b/docs/notebook/tutorial.ipynb index 0f31db1..4d6acae 100644 --- a/docs/notebook/tutorial.ipynb +++ b/docs/notebook/tutorial.ipynb @@ -16,17 +16,33 @@ "## API" ] }, + { + "cell_type": "markdown", + "id": "c5757429-9f80-44f2-9318-91182372ada1", + "metadata": {}, + "source": [ + "### Tracking" + ] + }, + { + "cell_type": "markdown", + "id": "402ba04f-4773-43fc-940f-8cfe705ec487", + "metadata": {}, + "source": [ + "#### Basics" + ] + }, { "cell_type": "markdown", "id": "2bb9e84a-8523-4e5f-bc01-1d6b234c19a6", "metadata": {}, "source": [ - "The `gpu_tracker` package provides the `Tracker` class which uses a subprocess to measure computational resource usage, namely the compute time, maximum CPU utilization, mean CPU utilization, maximum RAM used, maximum GPU utilization, mean GPU utilization, and maximum GPU RAM used. The `start()` method starts this process which tracks usage in the background. After calling `start()`, one can write the code for which resource usage is measured, followed by calling the `stop()` method. The compute time will be the time from the call to `start()` to the call to `stop()` and the RAM, GPU RAM, CPU utilization, and GPU utilization quantities will be the respective computational resources used by the code that's in between `start()` and `stop()`." + "The `gpu_tracker` package provides the `Tracker` class which uses a subprocess to measure computational resource usage, namely the compute time, maximum CPU utilization, mean CPU utilization, maximum RAM used, maximum GPU utilization, mean GPU utilization, and maximum GPU RAM used. It supports both NVIDIA and AMD GPUs. The `start()` method starts this process which tracks usage in the background. The `Tracker` class can be used as a context manager. Upon entering the context, one can write the code for which resource usage is measured. The compute time will be the time from entering the context to exiting the context and the RAM, GPU RAM, CPU utilization, and GPU utilization quantities will be the respective computational resources used by the code that's within the context." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 14, "id": "2ac7e291-08e4-4f88-a38c-02023dd813c1", "metadata": {}, "outputs": [], @@ -37,15 +53,13 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 15, "id": "5ad59e55-2bde-4453-9727-2302aa8c231a", "metadata": {}, "outputs": [], "source": [ - "tracker = gput.Tracker(n_expected_cores=1, sleep_time=0.1)\n", - "tracker.start()\n", - "example_function()\n", - "tracker.stop()" + "with gput.Tracker(n_expected_cores=1, sleep_time=0.1) as tracker:\n", + " example_function()" ] }, { @@ -58,7 +72,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 16, "id": "710e11da-7a70-49fe-be6d-c1d25ce9d53f", "metadata": {}, "outputs": [ @@ -69,19 +83,19 @@ "Max RAM:\n", " Unit: gigabytes\n", " System capacity: 67.254\n", - " System: 2.001\n", + " System: 4.307\n", " Main:\n", - " Total RSS: 0.94\n", - " Private RSS: 0.786\n", - " Shared RSS: 0.165\n", + " Total RSS: 0.924\n", + " Private RSS: 0.755\n", + " Shared RSS: 0.171\n", " Descendants:\n", " Total RSS: 0.0\n", " Private RSS: 0.0\n", " Shared RSS: 0.0\n", " Combined:\n", - " Total RSS: 0.94\n", - " Private RSS: 0.786\n", - " Shared RSS: 0.165\n", + " Total RSS: 0.924\n", + " Private RSS: 0.755\n", + " Shared RSS: 0.171\n", "Max GPU RAM:\n", " Unit: gigabytes\n", " System capacity: 16.376\n", @@ -93,25 +107,25 @@ " System core count: 12\n", " Number of expected cores: 1\n", " System:\n", - " Max sum percent: 162.3\n", - " Max hardware percent: 13.525\n", - " Mean sum percent: 144.283\n", - " Mean hardware percent: 12.024\n", + " Max sum percent: 222.6\n", + " Max hardware percent: 18.55\n", + " Mean sum percent: 149.285\n", + " Mean hardware percent: 12.44\n", " Main:\n", - " Max sum percent: 101.4\n", - " Max hardware percent: 101.4\n", - " Mean sum percent: 96.7\n", - " Mean hardware percent: 96.7\n", + " Max sum percent: 103.3\n", + " Max hardware percent: 103.3\n", + " Mean sum percent: 94.285\n", + " Mean hardware percent: 94.285\n", " Descendants:\n", " Max sum percent: 0.0\n", " Max hardware percent: 0.0\n", " Mean sum percent: 0.0\n", " Mean hardware percent: 0.0\n", " Combined:\n", - " Max sum percent: 101.4\n", - " Max hardware percent: 101.4\n", - " Mean sum percent: 96.7\n", - " Mean hardware percent: 96.7\n", + " Max sum percent: 103.3\n", + " Max hardware percent: 103.3\n", + " Mean sum percent: 94.285\n", + " Mean hardware percent: 94.285\n", " Main number of threads: 15\n", " Descendants number of threads: 0\n", " Combined number of threads: 15\n", @@ -121,8 +135,8 @@ " GPU percentages:\n", " Max sum percent: 5.0\n", " Max hardware percent: 5.0\n", - " Mean sum percent: 0.417\n", - " Mean hardware percent: 0.417\n", + " Mean sum percent: 0.385\n", + " Mean hardware percent: 0.385\n", "Compute time:\n", " Unit: hours\n", " Time: 0.001\n" @@ -162,18 +176,28 @@ "id": "dbf7ad83-21d0-4cd2-adbb-278fa80d2b13", "metadata": {}, "source": [ - "The `Tracker` can alternatively be used as a context manager rather than explicitly calling `start()` and `stop()`." + "The `Tracker` can alternatively be used by explicitly calling its `start()` and `stop()` methods which behave the same as entering and exiting the context manager respectively." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 18, "id": "08f688f4-f1b6-41dc-91f9-76f9c7e0fdb3", "metadata": {}, "outputs": [], "source": [ - "with gput.Tracker() as tracker:\n", - " example_function()" + "tracker = gput.Tracker()\n", + "tracker.start()\n", + "example_function()\n", + "tracker.stop()" + ] + }, + { + "cell_type": "markdown", + "id": "ad1d5fc4-0554-4717-9674-a435ee4e4126", + "metadata": {}, + "source": [ + "#### Arguments and Attributes" ] }, { @@ -181,7 +205,7 @@ "id": "7c7afe26-b2e0-4a4a-ba88-327ed6d4bb63", "metadata": {}, "source": [ - "The units of the computational resources can be modified as desired. For example, to measure the RAM in megabytes, the GPU RAM in megabytes, and the compute time in seconds:" + "The units of the computational resources can be modified as desired. The following example measures the RAM in megabytes, the GPU RAM in megabytes, and the compute time in seconds." ] }, { @@ -196,24 +220,24 @@ "text": [ "Max RAM:\n", " Unit: megabytes\n", - " System capacity: 67254.17\n", - " System: 2336.362\n", + " System capacity: 67254.166\n", + " System: 1984.791\n", " Main:\n", - " Total RSS: 919.99\n", - " Private RSS: 699.384\n", - " Shared RSS: 230.269\n", + " Total RSS: 873.853\n", + " Private RSS: 638.353\n", + " Shared RSS: 235.68\n", " Descendants:\n", " Total RSS: 0.0\n", " Private RSS: 0.0\n", " Shared RSS: 0.0\n", " Combined:\n", - " Total RSS: 919.99\n", - " Private RSS: 699.384\n", - " Shared RSS: 230.269\n", + " Total RSS: 873.853\n", + " Private RSS: 638.353\n", + " Shared RSS: 235.68\n", "Max GPU RAM:\n", " Unit: megabytes\n", " System capacity: 16376.0\n", - " System: 727.0\n", + " System: 728.0\n", " Main: 506.0\n", " Descendants: 0.0\n", " Combined: 506.0\n", @@ -221,25 +245,25 @@ " System core count: 12\n", " Number of expected cores: 12\n", " System:\n", - " Max sum percent: 166.5\n", - " Max hardware percent: 13.875\n", - " Mean sum percent: 144.55\n", - " Mean hardware percent: 12.046\n", + " Max sum percent: 161.6\n", + " Max hardware percent: 13.467\n", + " Mean sum percent: 145.517\n", + " Mean hardware percent: 12.126\n", " Main:\n", - " Max sum percent: 104.8\n", - " Max hardware percent: 8.733\n", - " Mean sum percent: 97.458\n", - " Mean hardware percent: 8.122\n", + " Max sum percent: 101.5\n", + " Max hardware percent: 8.458\n", + " Mean sum percent: 98.683\n", + " Mean hardware percent: 8.224\n", " Descendants:\n", " Max sum percent: 0.0\n", " Max hardware percent: 0.0\n", " Mean sum percent: 0.0\n", " Mean hardware percent: 0.0\n", " Combined:\n", - " Max sum percent: 104.8\n", - " Max hardware percent: 8.733\n", - " Mean sum percent: 97.458\n", - " Mean hardware percent: 8.122\n", + " Max sum percent: 101.5\n", + " Max hardware percent: 8.458\n", + " Mean sum percent: 98.683\n", + " Mean hardware percent: 8.224\n", " Main number of threads: 15\n", " Descendants number of threads: 0\n", " Combined number of threads: 15\n", @@ -247,13 +271,13 @@ " System GPU count: 1\n", " Number of expected GPUs: 1\n", " GPU percentages:\n", - " Max sum percent: 0.0\n", - " Max hardware percent: 0.0\n", - " Mean sum percent: 0.0\n", - " Mean hardware percent: 0.0\n", + " Max sum percent: 3.0\n", + " Max hardware percent: 3.0\n", + " Mean sum percent: 0.25\n", + " Mean hardware percent: 0.25\n", "Compute time:\n", " Unit: seconds\n", - " Time: 2.685\n" + " Time: 2.729\n" ] } ], @@ -284,12 +308,12 @@ "{\n", " \"max_ram\": {\n", " \"unit\": \"megabytes\",\n", - " \"system_capacity\": 67254.1696,\n", - " \"system\": 2336.3624959999997,\n", + " \"system_capacity\": 67254.165504,\n", + " \"system\": 1984.790528,\n", " \"main\": {\n", - " \"total_rss\": 919.9902719999999,\n", - " \"private_rss\": 699.3838079999999,\n", - " \"shared_rss\": 230.268928\n", + " \"total_rss\": 873.8529279999999,\n", + " \"private_rss\": 638.353408,\n", + " \"shared_rss\": 235.679744\n", " },\n", " \"descendants\": {\n", " \"total_rss\": 0.0,\n", @@ -297,15 +321,15 @@ " \"shared_rss\": 0.0\n", " },\n", " \"combined\": {\n", - " \"total_rss\": 919.9902719999999,\n", - " \"private_rss\": 699.3838079999999,\n", - " \"shared_rss\": 230.268928\n", + " \"total_rss\": 873.8529279999999,\n", + " \"private_rss\": 638.353408,\n", + " \"shared_rss\": 235.679744\n", " }\n", " },\n", " \"max_gpu_ram\": {\n", " \"unit\": \"megabytes\",\n", " \"system_capacity\": 16376.0,\n", - " \"system\": 727.0,\n", + " \"system\": 728.0,\n", " \"main\": 506.0,\n", " \"descendants\": 0.0,\n", " \"combined\": 506.0\n", @@ -314,16 +338,16 @@ " \"system_core_count\": 12,\n", " \"n_expected_cores\": 12,\n", " \"system\": {\n", - " \"max_sum_percent\": 166.5,\n", - " \"max_hardware_percent\": 13.875,\n", - " \"mean_sum_percent\": 144.55,\n", - " \"mean_hardware_percent\": 12.045833333333333\n", + " \"max_sum_percent\": 161.60000000000002,\n", + " \"max_hardware_percent\": 13.466666666666669,\n", + " \"mean_sum_percent\": 145.51666666666668,\n", + " \"mean_hardware_percent\": 12.12638888888889\n", " },\n", " \"main\": {\n", - " \"max_sum_percent\": 104.8,\n", - " \"max_hardware_percent\": 8.733333333333333,\n", - " \"mean_sum_percent\": 97.45833333333333,\n", - " \"mean_hardware_percent\": 8.121527777777779\n", + " \"max_sum_percent\": 101.5,\n", + " \"max_hardware_percent\": 8.458333333333334,\n", + " \"mean_sum_percent\": 98.68333333333334,\n", + " \"mean_hardware_percent\": 8.22361111111111\n", " },\n", " \"descendants\": {\n", " \"max_sum_percent\": 0.0,\n", @@ -332,10 +356,10 @@ " \"mean_hardware_percent\": 0.0\n", " },\n", " \"combined\": {\n", - " \"max_sum_percent\": 104.8,\n", - " \"max_hardware_percent\": 8.733333333333333,\n", - " \"mean_sum_percent\": 97.45833333333333,\n", - " \"mean_hardware_percent\": 8.121527777777779\n", + " \"max_sum_percent\": 101.5,\n", + " \"max_hardware_percent\": 8.458333333333334,\n", + " \"mean_sum_percent\": 98.68333333333334,\n", + " \"mean_hardware_percent\": 8.22361111111111\n", " },\n", " \"main_n_threads\": 15,\n", " \"descendants_n_threads\": 0,\n", @@ -345,15 +369,15 @@ " \"system_gpu_count\": 1,\n", " \"n_expected_gpus\": 1,\n", " \"gpu_percentages\": {\n", - " \"max_sum_percent\": 0.0,\n", - " \"max_hardware_percent\": 0.0,\n", - " \"mean_sum_percent\": 0.0,\n", - " \"mean_hardware_percent\": 0.0\n", + " \"max_sum_percent\": 3.0,\n", + " \"max_hardware_percent\": 3.0,\n", + " \"mean_sum_percent\": 0.25,\n", + " \"mean_hardware_percent\": 0.25\n", " }\n", " },\n", " \"compute_time\": {\n", " \"unit\": \"seconds\",\n", - " \"time\": 2.684972047805786\n", + " \"time\": 2.728560209274292\n", " }\n", "}\n" ] @@ -381,7 +405,7 @@ { "data": { "text/plain": [ - "MaxRAM(unit='megabytes', system_capacity=67254.1696, system=2336.3624959999997, main=RSSValues(total_rss=919.9902719999999, private_rss=699.3838079999999, shared_rss=230.268928), descendants=RSSValues(total_rss=0.0, private_rss=0.0, shared_rss=0.0), combined=RSSValues(total_rss=919.9902719999999, private_rss=699.3838079999999, shared_rss=230.268928))" + "MaxRAM(unit='megabytes', system_capacity=67254.165504, system=1984.790528, main=RSSValues(total_rss=873.8529279999999, private_rss=638.353408, shared_rss=235.679744), descendants=RSSValues(total_rss=0.0, private_rss=0.0, shared_rss=0.0), combined=RSSValues(total_rss=873.8529279999999, private_rss=638.353408, shared_rss=235.679744))" ] }, "execution_count": 7, @@ -423,7 +447,7 @@ { "data": { "text/plain": [ - "RSSValues(total_rss=919.9902719999999, private_rss=699.3838079999999, shared_rss=230.268928)" + "RSSValues(total_rss=873.8529279999999, private_rss=638.353408, shared_rss=235.679744)" ] }, "execution_count": 9, @@ -444,7 +468,7 @@ { "data": { "text/plain": [ - "919.9902719999999" + "873.8529279999999" ] }, "execution_count": 10, @@ -465,7 +489,7 @@ { "data": { "text/plain": [ - "MaxGPURAM(unit='megabytes', system_capacity=16376.0, system=727.0, main=506.0, descendants=0.0, combined=506.0)" + "MaxGPURAM(unit='megabytes', system_capacity=16376.0, system=728.0, main=506.0, descendants=0.0, combined=506.0)" ] }, "execution_count": 11, @@ -486,7 +510,7 @@ { "data": { "text/plain": [ - "ComputeTime(unit='seconds', time=2.684972047805786)" + "ComputeTime(unit='seconds', time=2.728560209274292)" ] }, "execution_count": 12, @@ -498,6 +522,99 @@ "tracker.resource_usage.compute_time" ] }, + { + "cell_type": "markdown", + "id": "57c1c857-175d-4497-bee8-28c615f31ac4", + "metadata": {}, + "source": [ + "Below is an example of using a child process. Notice the descendants fields are now non-zero." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "f429ced6-573b-4f0f-ad64-658e9c05242d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Max RAM:\n", + " Unit: gigabytes\n", + " System capacity: 67.254\n", + " System: 2.388\n", + " Main:\n", + " Total RSS: 0.849\n", + " Private RSS: 0.528\n", + " Shared RSS: 0.325\n", + " Descendants:\n", + " Total RSS: 0.845\n", + " Private RSS: 0.734\n", + " Shared RSS: 0.112\n", + " Combined:\n", + " Total RSS: 1.371\n", + " Private RSS: 1.05\n", + " Shared RSS: 0.325\n", + "Max GPU RAM:\n", + " Unit: gigabytes\n", + " System capacity: 16.376\n", + " System: 1.236\n", + " Main: 0.506\n", + " Descendants: 0.506\n", + " Combined: 1.012\n", + "CPU utilization:\n", + " System core count: 12\n", + " Number of expected cores: 2\n", + " System:\n", + " Max sum percent: 338.0\n", + " Max hardware percent: 28.167\n", + " Mean sum percent: 183.644\n", + " Mean hardware percent: 15.304\n", + " Main:\n", + " Max sum percent: 101.0\n", + " Max hardware percent: 50.5\n", + " Mean sum percent: 60.178\n", + " Mean hardware percent: 30.089\n", + " Descendants:\n", + " Max sum percent: 354.1\n", + " Max hardware percent: 177.05\n", + " Mean sum percent: 109.033\n", + " Mean hardware percent: 54.517\n", + " Combined:\n", + " Max sum percent: 452.2\n", + " Max hardware percent: 226.1\n", + " Mean sum percent: 169.211\n", + " Mean hardware percent: 84.606\n", + " Main number of threads: 15\n", + " Descendants number of threads: 13\n", + " Combined number of threads: 28\n", + "GPU utilization:\n", + " System GPU count: 1\n", + " Number of expected GPUs: 1\n", + " GPU percentages:\n", + " Max sum percent: 5.0\n", + " Max hardware percent: 5.0\n", + " Mean sum percent: 0.556\n", + " Mean hardware percent: 0.556\n", + "Compute time:\n", + " Unit: hours\n", + " Time: 0.001\n" + ] + } + ], + "source": [ + "import multiprocessing as mp\n", + "ctx = mp.get_context(method='spawn')\n", + "child_process = ctx.Process(target=example_function)\n", + "with gput.Tracker(n_expected_cores=2, sleep_time=0.4) as tracker:\n", + " child_process.start()\n", + " example_function()\n", + " child_process.join()\n", + "child_process.close()\n", + "print(tracker)" + ] + }, { "cell_type": "markdown", "id": "c2cacaf2-316f-463b-bccd-3a9cdcd89a66", @@ -508,7 +625,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "id": "8de6c83f-63b1-4fd3-ae22-a87bd760aad2", "metadata": {}, "outputs": [ @@ -542,105 +659,1489 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "id": "e568483c-18bc-4162-b74e-03a4d6ff8b45", "metadata": {}, "outputs": [], "source": [ - "tracker = gput.Tracker(resource_usage_file='path/to/my-file.pkl')" + "tracker = gput.Tracker(resource_usage_file='path/to/my-file.pkl')" + ] + }, + { + "cell_type": "markdown", + "id": "7abc5c16-0d24-4c73-8e06-8bfa9b101298", + "metadata": {}, + "source": [ + "While the `Tracker` class automatically detects which brand of GPU is installed (either NVIDIA or AMD), one can explicitly choose the GPU brand with the `gpu_brand` parameter" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "da1a8850-d143-451e-8faf-352f6b5b37c6", + "metadata": {}, + "outputs": [], + "source": [ + "tracker = gput.Tracker(gpu_brand='nvidia')" + ] + }, + { + "cell_type": "markdown", + "id": "ad908deb-2314-4b1d-bc1d-4ab14ebbcb32", + "metadata": {}, + "source": [ + "While the `Tracker` by default stores aggregates of the computational resource usage across the timepoints, one can store the individual measured values at every timepoint in a file, either CSV or SQLite format, using the `tracking_file` parameter. **NOTE** for the CSV format, the static data (e.g. RAM system capacity, number of cores in the OS, etc.) is stored on the the first two rows with the headers on the first row followed by the static data on the second row. The headers of the timepoint data is on the third row followed by the timepoint data on the remaining rows. The SQLite file, however, stores the static data and timepoint data in different tables: \"data\" and \"static_data\" respectively." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "fc8b9ce0-3b78-4527-9905-2b1a6564c9b0", + "metadata": {}, + "outputs": [], + "source": [ + "tracker = gput.Tracker(tracking_file='my-file.csv')\n", + "tracker = gput.Tracker(tracking_file='my-file.sqlite')" + ] + }, + { + "cell_type": "markdown", + "id": "5f8f5b0d-2200-4b39-8abf-2dddc03f952f", + "metadata": {}, + "source": [ + "### Sub-tracking" + ] + }, + { + "cell_type": "markdown", + "id": "17200926-f0e1-42c4-a8f2-3e6d3734de9a", + "metadata": {}, + "source": [ + "#### Logging Code Block Timestamps" + ] + }, + { + "cell_type": "markdown", + "id": "841d90bc-7f79-4d1f-a92c-d339a6403851", + "metadata": {}, + "source": [ + "While the `Tracker` object by itself can track a block of code, there are some cases where one might want to track one code block and a smaller code block within it or track multiple code blocks at a time without creating several tracking processes simultaneously, especially when tracking a code block that is called within multi-processing or a code block that is called several times. Similarly, one might want to track the resource usage of a particular function whenever it is called. Whether a function or some other specified code block, the `SubTracker` class can determine the computational resources used during the start times and stop times of a given code block. This includes the mean resources used during the times the code block is called, the mean time taken to complete the code block each time it is called, the number of times it is called, etc. Sub-tracking uses the tracking file specified by the `tracking_file` parameter of the `Tracker` object alonside a sub-tracking file which contains the start and stop times of each code block one desires to sub-track. The sub-tracking file can be created in Python using the `SubTracker` class, a context manager around the desired code block. Setting the `overwrite` parameter (default `False`) of the `Tracker` and `SubTracker` to `True` overwrites the `tracking_file` or `sub_tracking_file` respectively if a file of that path already exists. Keep this paramter at `False` to avoid loss of data if it is still needed." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b3b5e400-1e00-4a6d-b11b-6618efaf2b4a", + "metadata": {}, + "outputs": [], + "source": [ + "tracker = gput.Tracker(sleep_time=0.5, tracking_file='tracking.csv', overwrite=False)\n", + "tracker.start()\n", + "# Perform other computation here\n", + "for _ in range(5):\n", + " with gput.SubTracker(code_block_name='my-code-block', sub_tracking_file='sub-tracking.csv', overwrite=False):\n", + " example_function()\n", + "# Perform other computation here" + ] + }, + { + "cell_type": "markdown", + "id": "20a8fac6-26a9-425d-91f1-b40216f59e0a", + "metadata": {}, + "source": [ + "In the above example, a tracking session is initiated within the context of the `Tracker` object whose tracking file is 'tracking.csv'. Then we have a for loop wherein a function is called 5 times. Other computation might be performed before or after this for loop, but if the computational resource usage of the contents of the for loop is of interest in particular, that code block can be sub-tracked by wrapping it within the context of the `SubTracker` object whose sub-tracking file is 'sub-tracking.csv'. Alternatively, SQLite (.sqlite) files can be used to speed up querying in the case of very long tracking sessions. The name of the code block is 'my-code-block', given to distinguish it from other code blocks being sub-tracked." + ] + }, + { + "cell_type": "markdown", + "id": "883a4070-caf3-469f-ad73-75a104764f94", + "metadata": {}, + "source": [ + "If one wants to sub-track all calls to a particular function, the `sub_track` function decorator can be used instead of wrapping the function call with a `SubTracker` context every time it is called:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c96501c1-75ed-4af9-b93c-9ff9aef607c8", + "metadata": {}, + "outputs": [], + "source": [ + "@gput.sub_track(code_block_name='my-function', sub_tracking_file='sub-tracking.csv', overwrite=False)\n", + "def my_function(*args, **kwargs):\n", + " example_function()\n", + "\n", + "for _ in range(3):\n", + " my_function()\n", + "tracker.stop()" + ] + }, + { + "cell_type": "markdown", + "id": "0760a8e1-dd2c-4dde-9513-f135c3587e91", + "metadata": {}, + "source": [ + "When sub-tracking a code block using the `SubTracker` context, the default `code_block_name` is the relative path of the Python file followed by a colon followed by the line number where the `SubTracker` context is initialized. When sub-tracking a function, the default `code_block_name` is the relative path of the Python file followed by a colon followed by the name of the function." + ] + }, + { + "cell_type": "markdown", + "id": "24bfedad-8388-4305-890c-2b8690940527", + "metadata": {}, + "source": [ + "#### Analysis" + ] + }, + { + "cell_type": "markdown", + "id": "92490af9-d033-4ca6-a9d2-32aa6e9901e6", + "metadata": {}, + "source": [ + "Once a tracking file and at least one sub-tracking file have been created, the results can be analyzed using the `SubTrackingAnalyzer` class, instantiated by passing in the path to the tracking file and the path to the sub-tracking file." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "9d06fd55-0910-438f-8192-f458e6235628", + "metadata": {}, + "outputs": [], + "source": [ + "analyzer = gput.SubTrackingAnalyzer(tracking_file='tracking.csv', sub_tracking_file='sub-tracking.csv')" + ] + }, + { + "cell_type": "markdown", + "id": "254db857-6d60-4dad-9982-5a19310c7acb", + "metadata": {}, + "source": [ + "When sub-tracking a code block within a function that's part of multi-processing (i.e. called within one of multiple sub-processes), the sub-tracking file must be unique to that process, which is why the default `sub_tracking_file` is the process ID followed by \".csv\". One way or another, a different sub-tracking file must be created per worker to prevent multiple processes from logging to the same file. The `SubTrackingAnalyzer` has a `combine_sub_tracking_files` method that can combine these multiple sub-tracking files into a single sub-tracking file whose path is specified by the `sub_tracking_file` parameter above. Once a sub-tracking file is created from a single process or combined from multiple, the results can be obtained via the `sub_tracking_results` method." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "c2690ab9-a602-4b32-9920-6929e7bf9de4", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "gpu_tracker.sub_tracker.SubTrackingResults" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results = analyzer.sub_tracking_results()\n", + "type(results)" + ] + }, + { + "cell_type": "markdown", + "id": "7b5dca7b-65f4-4efe-bb01-e88d7ec4f5b9", + "metadata": {}, + "source": [ + "The `sub_tracking_results` method returns a `SubTrackingResults` object which contains summary statistics of the overall resource usage (all time points in the tracking file) and the per code block resource usage (the timepoints within calls to a code block i.e. the start/stop times) as `DataFrame` or `Series` objects from the `pandas` package." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "89893780-3de4-42cc-b947-c2b1c951f382", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
minmaxmeanstd
main_ram0.3412170.9205600.8619210.100084
descendants_ram0.0000000.0000000.0000000.000000
combined_ram0.3412170.9205600.8619210.100084
system_ram4.6026185.7015175.2819260.220270
main_gpu_ram0.0000000.5060000.4483640.151267
descendants_gpu_ram0.0000000.0000000.0000000.000000
combined_gpu_ram0.0000000.5060000.4483640.151267
system_gpu_ram0.2150000.7270000.6689090.152657
gpu_sum_utilization_percent0.0000000.0000000.0000000.000000
gpu_hardware_utilization_percent0.0000000.0000000.0000000.000000
main_n_threads12.00000015.00000014.7575760.791766
descendants_n_threads0.0000000.0000000.0000000.000000
combined_n_threads12.00000015.00000014.7575760.791766
cpu_system_sum_utilization_percent15.400000138.400000121.91818219.484617
cpu_system_hardware_utilization_percent1.28333311.53333310.1598481.623718
cpu_main_sum_utilization_percent91.400000103.30000099.0606062.571228
cpu_main_hardware_utilization_percent7.6166678.6083338.2550510.214269
cpu_descendants_sum_utilization_percent0.0000000.0000000.0000000.000000
cpu_descendants_hardware_utilization_percent0.0000000.0000000.0000000.000000
cpu_combined_sum_utilization_percent91.400000103.30000099.0606062.571228
cpu_combined_hardware_utilization_percent7.6166678.6083338.2550510.214269
\n", + "
" + ], + "text/plain": [ + " min max \\\n", + "main_ram 0.341217 0.920560 \n", + "descendants_ram 0.000000 0.000000 \n", + "combined_ram 0.341217 0.920560 \n", + "system_ram 4.602618 5.701517 \n", + "main_gpu_ram 0.000000 0.506000 \n", + "descendants_gpu_ram 0.000000 0.000000 \n", + "combined_gpu_ram 0.000000 0.506000 \n", + "system_gpu_ram 0.215000 0.727000 \n", + "gpu_sum_utilization_percent 0.000000 0.000000 \n", + "gpu_hardware_utilization_percent 0.000000 0.000000 \n", + "main_n_threads 12.000000 15.000000 \n", + "descendants_n_threads 0.000000 0.000000 \n", + "combined_n_threads 12.000000 15.000000 \n", + "cpu_system_sum_utilization_percent 15.400000 138.400000 \n", + "cpu_system_hardware_utilization_percent 1.283333 11.533333 \n", + "cpu_main_sum_utilization_percent 91.400000 103.300000 \n", + "cpu_main_hardware_utilization_percent 7.616667 8.608333 \n", + "cpu_descendants_sum_utilization_percent 0.000000 0.000000 \n", + "cpu_descendants_hardware_utilization_percent 0.000000 0.000000 \n", + "cpu_combined_sum_utilization_percent 91.400000 103.300000 \n", + "cpu_combined_hardware_utilization_percent 7.616667 8.608333 \n", + "\n", + " mean std \n", + "main_ram 0.861921 0.100084 \n", + "descendants_ram 0.000000 0.000000 \n", + "combined_ram 0.861921 0.100084 \n", + "system_ram 5.281926 0.220270 \n", + "main_gpu_ram 0.448364 0.151267 \n", + "descendants_gpu_ram 0.000000 0.000000 \n", + "combined_gpu_ram 0.448364 0.151267 \n", + "system_gpu_ram 0.668909 0.152657 \n", + "gpu_sum_utilization_percent 0.000000 0.000000 \n", + "gpu_hardware_utilization_percent 0.000000 0.000000 \n", + "main_n_threads 14.757576 0.791766 \n", + "descendants_n_threads 0.000000 0.000000 \n", + "combined_n_threads 14.757576 0.791766 \n", + "cpu_system_sum_utilization_percent 121.918182 19.484617 \n", + "cpu_system_hardware_utilization_percent 10.159848 1.623718 \n", + "cpu_main_sum_utilization_percent 99.060606 2.571228 \n", + "cpu_main_hardware_utilization_percent 8.255051 0.214269 \n", + "cpu_descendants_sum_utilization_percent 0.000000 0.000000 \n", + "cpu_descendants_hardware_utilization_percent 0.000000 0.000000 \n", + "cpu_combined_sum_utilization_percent 99.060606 2.571228 \n", + "cpu_combined_hardware_utilization_percent 8.255051 0.214269 " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.overall" + ] + }, + { + "cell_type": "markdown", + "id": "4f840359-3ab5-45f2-a5cf-d1c442d7fe44", + "metadata": {}, + "source": [ + "The `SubTrackingResults` class additionally contains the static data i.e. the information that remains constant throughout the tracking session." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "ba266d13-7317-4eff-8ca9-e547baf890f3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ram_unit gigabytes\n", + "gpu_ram_unit gigabytes\n", + "time_unit hours\n", + "ram_system_capacity 67.254166\n", + "gpu_ram_system_capacity 16.376\n", + "system_core_count 12\n", + "n_expected_cores 12\n", + "system_gpu_count 1\n", + "n_expected_gpus 1\n", + "Name: 0, dtype: object" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.static_data" + ] + }, + { + "cell_type": "markdown", + "id": "1af42d3a-da9e-4be1-a5ea-808887ecab3d", + "metadata": {}, + "source": [ + "The `code_block_results` attribute of the `SubTrackingResults` class is a list of `CodeBlockResults` objects, containing the resource usage and compute time summary statistics. In this case, there are two `CodeBlockResults` objects in the list since there were two code blocks sub-tracked in this tracking session." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "7aa9edb7-bf15-4cbf-a55c-5eb4eb950240", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "gpu_tracker.sub_tracker.CodeBlockResults" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "[my_code_block_results, my_function_results] = results.code_block_results\n", + "type(my_code_block_results)" + ] + }, + { + "cell_type": "markdown", + "id": "d9226949-f9de-4f88-ac1d-a5bd90a835e2", + "metadata": {}, + "source": [ + "The `compute_time` attribute of the `CodeBlockResults` class contains summary statistics for the time spent on the code block, where `total` is the total amount of time spent within the code block during the tracking session, `mean` is the average time taken on each call to the code block, etc. The `resource_usage` attribute provides summary statistics for the computational resources used during calls to the code block i.e. within the start/stop times." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "9bb17d2a-7149-4d04-a543-8f3793bae678", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "min 2.630907\n", + "max 2.869182\n", + "mean 2.685580\n", + "std 0.102789\n", + "total 13.427902\n", + "dtype: float64" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "my_code_block_results.compute_time" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "6b843a24-9e9f-4566-b447-2067410ba64e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
minmaxmeanstd
main_ram0.3412170.9122780.8469990.122948
descendants_ram0.0000000.0000000.0000000.000000
combined_ram0.3412170.9122780.8469990.122948
system_ram4.6026185.2613575.1706650.147118
main_gpu_ram0.0000000.5060000.4154290.182971
descendants_gpu_ram0.0000000.0000000.0000000.000000
combined_gpu_ram0.0000000.5060000.4154290.182971
system_gpu_ram0.2150000.7270000.6357140.184676
gpu_sum_utilization_percent0.0000000.0000000.0000000.000000
gpu_hardware_utilization_percent0.0000000.0000000.0000000.000000
main_n_threads12.00000015.00000014.6190480.973457
descendants_n_threads0.0000000.0000000.0000000.000000
combined_n_threads12.00000015.00000014.6190480.973457
cpu_system_sum_utilization_percent15.400000138.400000120.14285724.347907
cpu_system_hardware_utilization_percent1.28333311.53333310.0119052.028992
cpu_main_sum_utilization_percent91.400000103.30000098.6523812.733243
cpu_main_hardware_utilization_percent7.6166678.6083338.2210320.227770
cpu_descendants_sum_utilization_percent0.0000000.0000000.0000000.000000
cpu_descendants_hardware_utilization_percent0.0000000.0000000.0000000.000000
cpu_combined_sum_utilization_percent91.400000103.30000098.6523812.733243
cpu_combined_hardware_utilization_percent7.6166678.6083338.2210320.227770
\n", + "
" + ], + "text/plain": [ + " min max \\\n", + "main_ram 0.341217 0.912278 \n", + "descendants_ram 0.000000 0.000000 \n", + "combined_ram 0.341217 0.912278 \n", + "system_ram 4.602618 5.261357 \n", + "main_gpu_ram 0.000000 0.506000 \n", + "descendants_gpu_ram 0.000000 0.000000 \n", + "combined_gpu_ram 0.000000 0.506000 \n", + "system_gpu_ram 0.215000 0.727000 \n", + "gpu_sum_utilization_percent 0.000000 0.000000 \n", + "gpu_hardware_utilization_percent 0.000000 0.000000 \n", + "main_n_threads 12.000000 15.000000 \n", + "descendants_n_threads 0.000000 0.000000 \n", + "combined_n_threads 12.000000 15.000000 \n", + "cpu_system_sum_utilization_percent 15.400000 138.400000 \n", + "cpu_system_hardware_utilization_percent 1.283333 11.533333 \n", + "cpu_main_sum_utilization_percent 91.400000 103.300000 \n", + "cpu_main_hardware_utilization_percent 7.616667 8.608333 \n", + "cpu_descendants_sum_utilization_percent 0.000000 0.000000 \n", + "cpu_descendants_hardware_utilization_percent 0.000000 0.000000 \n", + "cpu_combined_sum_utilization_percent 91.400000 103.300000 \n", + "cpu_combined_hardware_utilization_percent 7.616667 8.608333 \n", + "\n", + " mean std \n", + "main_ram 0.846999 0.122948 \n", + "descendants_ram 0.000000 0.000000 \n", + "combined_ram 0.846999 0.122948 \n", + "system_ram 5.170665 0.147118 \n", + "main_gpu_ram 0.415429 0.182971 \n", + "descendants_gpu_ram 0.000000 0.000000 \n", + "combined_gpu_ram 0.415429 0.182971 \n", + "system_gpu_ram 0.635714 0.184676 \n", + "gpu_sum_utilization_percent 0.000000 0.000000 \n", + "gpu_hardware_utilization_percent 0.000000 0.000000 \n", + "main_n_threads 14.619048 0.973457 \n", + "descendants_n_threads 0.000000 0.000000 \n", + "combined_n_threads 14.619048 0.973457 \n", + "cpu_system_sum_utilization_percent 120.142857 24.347907 \n", + "cpu_system_hardware_utilization_percent 10.011905 2.028992 \n", + "cpu_main_sum_utilization_percent 98.652381 2.733243 \n", + "cpu_main_hardware_utilization_percent 8.221032 0.227770 \n", + "cpu_descendants_sum_utilization_percent 0.000000 0.000000 \n", + "cpu_descendants_hardware_utilization_percent 0.000000 0.000000 \n", + "cpu_combined_sum_utilization_percent 98.652381 2.733243 \n", + "cpu_combined_hardware_utilization_percent 8.221032 0.227770 " + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "my_code_block_results.resource_usage" + ] + }, + { + "cell_type": "markdown", + "id": "1b7149fe-ebac-4575-a203-9a6410371f4c", + "metadata": {}, + "source": [ + "Additionally, the `CodeBlockResults` class also has attributes for the name of the code block, the number of times it was called during the tracking session, the number of calls that included at least one timepoint, and the total number of timepoints measured within all calls to the code block." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c6626a74-24a3-4676-a322-41f4514f192c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "('my-code-block', 5, 5, 21)" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "my_code_block_results.name, my_code_block_results.num_calls, my_code_block_results.num_non_empty_calls, my_code_block_results.num_timepoints" + ] + }, + { + "cell_type": "markdown", + "id": "b1eb73ed-7b5d-4250-9758-c2d8f5b92223", + "metadata": {}, + "source": [ + "The analysis results can also be printed in their entirety. Alternatively, the `to_json` method can provide this comprehensive information in JSON format." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1cfa4d02-9812-4ad5-b6d9-dc5f1f80cc03", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overall:\n", + "\t min max mean std\n", + "\tmain_ram 0.341860 0.944374 0.856037 0.125014\n", + "\tdescendants_ram 0.000000 0.000000 0.000000 0.000000\n", + "\tcombined_ram 0.341860 0.944374 0.856037 0.125014\n", + "\tsystem_ram 4.859711 5.553644 5.253445 0.134081\n", + "\tmain_gpu_ram 0.000000 0.506000 0.429920 0.170432\n", + "\tdescendants_gpu_ram 0.000000 0.000000 0.000000 0.000000\n", + "\tcombined_gpu_ram 0.000000 0.506000 0.429920 0.170432\n", + "\tsystem_gpu_ram 0.215000 0.727000 0.650320 0.172010\n", + "\tgpu_sum_utilization_percent 0.000000 3.000000 0.120000 0.600000\n", + "\tgpu_hardware_utilization_percent 0.000000 3.000000 0.120000 0.600000\n", + "\tmain_n_threads 12.000000 15.000000 14.720000 0.842615\n", + "\tdescendants_n_threads 0.000000 0.000000 0.000000 0.000000\n", + "\tcombined_n_threads 12.000000 15.000000 14.720000 0.842615\n", + "\tcpu_system_sum_utilization_percent 11.900000 133.400000 119.212000 22.741909\n", + "\tcpu_system_hardware_utilization_percent 0.991667 11.116667 9.934333 1.895159\n", + "\tcpu_main_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767\n", + "\tcpu_main_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564\n", + "\tcpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\tcpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\tcpu_combined_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767\n", + "\tcpu_combined_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564\n", + "Static Data:\n", + "\t ram_unit gpu_ram_unit time_unit ram_system_capacity gpu_ram_system_capacity system_core_count n_expected_cores system_gpu_count n_expected_gpus\n", + "\t gigabytes gigabytes hours 67.254166 16.376 12 12 1 1\n", + "Code Block Results:\n", + "\tName: my-code-block\n", + "\tNum Timepoints: 12\n", + "\tNum Calls: 3\n", + "\tNum Non Empty Calls: 3\n", + "\tCompute Time:\n", + "\t\t min max mean std total\n", + "\t\t 2.580433 2.789909 2.651185 0.120147 7.953554\n", + "\tResource Usage:\n", + "\t\t min max mean std\n", + "\t\tmain_ram 0.341860 0.936559 0.808736 0.167663\n", + "\t\tdescendants_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_ram 0.341860 0.936559 0.808736 0.167663\n", + "\t\tsystem_ram 4.859711 5.553644 5.231854 0.191567\n", + "\t\tmain_gpu_ram 0.000000 0.506000 0.363500 0.225892\n", + "\t\tdescendants_gpu_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_gpu_ram 0.000000 0.506000 0.363500 0.225892\n", + "\t\tsystem_gpu_ram 0.215000 0.727000 0.583250 0.228088\n", + "\t\tgpu_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tgpu_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tmain_n_threads 12.000000 15.000000 14.416667 1.164500\n", + "\t\tdescendants_n_threads 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_n_threads 12.000000 15.000000 14.416667 1.164500\n", + "\t\tcpu_system_sum_utilization_percent 11.900000 130.800000 113.641667 32.352363\n", + "\t\tcpu_system_hardware_utilization_percent 0.991667 10.900000 9.470139 2.696030\n", + "\t\tcpu_main_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587\n", + "\t\tcpu_main_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549\n", + "\t\tcpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_combined_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587\n", + "\t\tcpu_combined_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549\n", + "\n", + "\tName: my-function\n", + "\tNum Timepoints: 12\n", + "\tNum Calls: 3\n", + "\tNum Non Empty Calls: 3\n", + "\tCompute Time:\n", + "\t\t min max mean std total\n", + "\t\t 2.538011 2.577679 2.553176 0.021419 7.659528\n", + "\tResource Usage:\n", + "\t\t min max mean std\n", + "\t\tmain_ram 0.864592 0.944374 0.896998 0.034505\n", + "\t\tdescendants_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_ram 0.864592 0.944374 0.896998 0.034505\n", + "\t\tsystem_ram 5.203415 5.315219 5.271566 0.038751\n", + "\t\tmain_gpu_ram 0.314000 0.506000 0.490000 0.055426\n", + "\t\tdescendants_gpu_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_gpu_ram 0.314000 0.506000 0.490000 0.055426\n", + "\t\tsystem_gpu_ram 0.535000 0.727000 0.711000 0.055426\n", + "\t\tgpu_sum_utilization_percent 0.000000 3.000000 0.250000 0.866025\n", + "\t\tgpu_hardware_utilization_percent 0.000000 3.000000 0.250000 0.866025\n", + "\t\tmain_n_threads 15.000000 15.000000 15.000000 0.000000\n", + "\t\tdescendants_n_threads 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_n_threads 15.000000 15.000000 15.000000 0.000000\n", + "\t\tcpu_system_sum_utilization_percent 120.300000 133.400000 124.566667 4.001439\n", + "\t\tcpu_system_hardware_utilization_percent 10.025000 11.116667 10.380556 0.333453\n", + "\t\tcpu_main_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332\n", + "\t\tcpu_main_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111\n", + "\t\tcpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_combined_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332\n", + "\t\tcpu_combined_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111\n", + "\n", + "\n" + ] + } + ], + "source": [ + "print(results)" + ] + }, + { + "cell_type": "markdown", + "id": "91d9aa35-21b8-408e-a602-a542b68802cf", + "metadata": {}, + "source": [ + "#### Comparison" + ] + }, + { + "cell_type": "markdown", + "id": "8c94f65e-e48d-4c7e-bdb2-2b6081651674", + "metadata": {}, + "source": [ + "The `TrackingComparison` class allows for comparing the resource usage of multiple tracking sessions, both the overall usage of the sessions and any code blocks that were sub-tracked. This is helpful if one wants to see how changes to the process might impact the computational efficiency of it, such as changes to implementation, input data, etc. To do this, the `TrackingComparison` takes a mapping of the given name of a tracking session to the file path where a `SubTrackingResults` object is stored in pickle format. Say we had two tracking sessions and we wanted to compare them. First, we store the `results` of the first tracking session in a pickle file. If we'd like to re-use the same names for the `tracking_file` and `sub_tracking_file` in the second tracking session, we can safely set the `overwrite` argument to `True` since their data has been saved in 'results.pkl'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f32eeb3f-7dc1-4e8c-99b0-617e057a8b50", + "metadata": {}, + "outputs": [], + "source": [ + "import pickle as pkl\n", + "import os\n", + "\n", + "with open('results.pkl', 'wb') as file:\n", + " pkl.dump(results, file)\n", + "os._exit(0)" + ] + }, + { + "cell_type": "markdown", + "id": "e6fe42cc-e069-456d-b084-e02162e3adca", + "metadata": {}, + "source": [ + "Once we have the results of the first tracking session saved, we can start a new tracking session in another run of the program that we are profiling. Say we made some code changes and we want to compare the two implementations, we can populate a new `tracking_file` and `sub_tracking_file` with data from the new tracking session." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "183ea8c3-79ad-4d73-bef8-51dcf74909a3", + "metadata": {}, + "outputs": [], + "source": [ + "import gpu_tracker as gput\n", + "from example_module import example_function\n", + "import pickle as pkl\n", + "\n", + "@gput.sub_track(code_block_name='my-function', sub_tracking_file='sub-tracking.csv', overwrite=True)\n", + "def my_function(*args, **kwargs):\n", + " example_function()\n", + "\n", + "with gput.Tracker(sleep_time=0.5, tracking_file='tracking.csv', overwrite=True):\n", + " for _ in range(3):\n", + " with gput.SubTracker(code_block_name='my-code-block', sub_tracking_file='sub-tracking.csv', overwrite=True):\n", + " example_function()\n", + " my_function()\n", + "results2 = gput.SubTrackingAnalyzer(tracking_file='tracking.csv', sub_tracking_file='sub-tracking.csv').sub_tracking_results()\n", + "with open('results2.pkl', 'wb') as file:\n", + " pkl.dump(results2, file)" + ] + }, + { + "cell_type": "markdown", + "id": "f4df28d0-f020-449d-bdcc-f229ac88c986", + "metadata": {}, + "source": [ + "The first tracking session stored its results in 'results.pkl' while the second tracking session stored its results in 'results2.pkl'. Say we decided to call the first session 'A' and the second session 'B'. The `TrackingComparison` object would be initialized like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "39663fc5-c4b3-4099-b418-be5d57afa38a", + "metadata": {}, + "outputs": [], + "source": [ + "comparison = gput.TrackingComparison(file_path_map={'A': 'results.pkl', 'B': 'results2.pkl'})" + ] + }, + { + "cell_type": "markdown", + "id": "62c8d695-622d-475b-b264-1e4f549fce6f", + "metadata": {}, + "source": [ + "Once the `TrackingComparison` is created, its compare method generates the `ComparisonResults` object detailing the computational resource usage measured in one tracking session to that of the other tracking sessions. The `statistic` parameter determines which summary statistic of the measurements to compare, defaulting to 'mean'. In this example, we will compare the maximum measurements by setting `statistic` to 'max'." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "6034e4c2-0837-4dd6-b305-5f0f97abdc55", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "gpu_tracker.sub_tracker.ComparisonResults" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results = comparison.compare(statistic='max')\n", + "type(results)" + ] + }, + { + "cell_type": "markdown", + "id": "cc24e155-5852-40ce-8dd5-7df088785153", + "metadata": {}, + "source": [ + "The `overall_resource_usage` attribute of the `ComparisonResults` class is a dictionary mapping each measurement to a `Series` comparing that measurement across all timepoints in one tracking session to another." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "feb75a78-c84b-41e5-a266-2668ced824f3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dict_keys(['main_ram', 'descendants_ram', 'combined_ram', 'system_ram', 'main_gpu_ram', 'descendants_gpu_ram', 'combined_gpu_ram', 'system_gpu_ram', 'gpu_sum_utilization_percent', 'gpu_hardware_utilization_percent', 'main_n_threads', 'descendants_n_threads', 'combined_n_threads', 'cpu_system_sum_utilization_percent', 'cpu_system_hardware_utilization_percent', 'cpu_main_sum_utilization_percent', 'cpu_main_hardware_utilization_percent', 'cpu_descendants_sum_utilization_percent', 'cpu_descendants_hardware_utilization_percent', 'cpu_combined_sum_utilization_percent', 'cpu_combined_hardware_utilization_percent'])" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.overall_resource_usage.keys()" + ] + }, + { + "cell_type": "markdown", + "id": "ae8426f1-6cad-4180-80a8-f63048226609", + "metadata": {}, + "source": [ + "For example, we can compare the overall maximum 'main_ram' of tracking session 'A' to tracking session 'B'." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0a76fa26-a041-4e44-b5cd-0065066818ba", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "A 0.920560\n", + "B 0.944374\n", + "dtype: float64" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.overall_resource_usage['main_ram']" + ] + }, + { + "cell_type": "markdown", + "id": "4f0daecd-7560-4d8d-8677-da47119fba96", + "metadata": {}, + "source": [ + "The `code_block_resource_usage` attribute is a dictionary that compares the same resource usage but for each code block rather than overall." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "e4d6ee1f-cfcb-47bb-9503-dddf757ed7cc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dict_keys(['main_ram', 'descendants_ram', 'combined_ram', 'system_ram', 'main_gpu_ram', 'descendants_gpu_ram', 'combined_gpu_ram', 'system_gpu_ram', 'gpu_sum_utilization_percent', 'gpu_hardware_utilization_percent', 'main_n_threads', 'descendants_n_threads', 'combined_n_threads', 'cpu_system_sum_utilization_percent', 'cpu_system_hardware_utilization_percent', 'cpu_main_sum_utilization_percent', 'cpu_main_hardware_utilization_percent', 'cpu_descendants_sum_utilization_percent', 'cpu_descendants_hardware_utilization_percent', 'cpu_combined_sum_utilization_percent', 'cpu_combined_hardware_utilization_percent'])" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.code_block_resource_usage.keys()" + ] + }, + { + "cell_type": "markdown", + "id": "f9bce9d8-4c21-4257-9481-36d8aed6c15b", + "metadata": {}, + "source": [ + "Each measurement is a dictionary mapping each code block name to the resources used across tracking sessions in that code block." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "57e7a315-af09-4ded-adfb-90a49ae2ef52", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dict_keys(['my-code-block', 'my-function'])" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.code_block_resource_usage['main_ram'].keys()" + ] + }, + { + "cell_type": "markdown", + "id": "4a3488dd-47ec-4be8-a30b-986dc4059a7c", + "metadata": {}, + "source": [ + "For example, the maximum 'main_ram' used by 'my-code-block' in tracking session 'A' can be compared to that of tracking session 'B'." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "742c8755-ea9c-43b8-b10e-73a97d19b761", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "A 0.912278\n", + "B 0.936559\n", + "dtype: float64" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.code_block_resource_usage['main_ram']['my-code-block']" + ] + }, + { + "cell_type": "markdown", + "id": "bba31658-87b8-4708-a225-fdb44ca602c9", + "metadata": {}, + "source": [ + "Finally the `code_block_compute_time` attribute is a dictionary that compares the compute time summary statistics for each code block and for each tracking session." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "4247891a-e0a9-46f1-8ca1-4ca28d13679b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dict_keys(['my-code-block', 'my-function'])" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.code_block_compute_time.keys()" + ] + }, + { + "cell_type": "markdown", + "id": "b8aba2e7-c49a-444d-b90d-c1488ad92071", + "metadata": {}, + "source": [ + "For example, we can compare the maximum compute time of 'my-code-block' in tracking session 'A' to that of tracking session 'B'." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "355f6538-969d-412c-9e88-752f68f3c60d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "B 2.789909\n", + "A 2.869182\n", + "dtype: float64" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.code_block_compute_time['my-code-block']" ] }, { "cell_type": "markdown", - "id": "57c1c857-175d-4497-bee8-28c615f31ac4", + "id": "308d0ff0-116c-48fb-b399-c7f000c69735", "metadata": {}, "source": [ - "Below is an example of using a child process. Notice the descendants fields are now non-zero." + "The comparison results can also be printed in their entirety. Alternatively, the `to_json` method can provide this comprehensive information in JSON format." ] }, { "cell_type": "code", - "execution_count": 15, - "id": "f429ced6-573b-4f0f-ad64-658e9c05242d", + "execution_count": 7, + "id": "a05307bd-ca33-42d9-a9b4-ed4643d303e5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Max RAM:\n", - " Unit: gigabytes\n", - " System capacity: 67.254\n", - " System: 3.033\n", - " Main:\n", - " Total RSS: 0.865\n", - " Private RSS: 0.55\n", - " Shared RSS: 0.32\n", - " Descendants:\n", - " Total RSS: 0.854\n", - " Private RSS: 0.737\n", - " Shared RSS: 0.118\n", - " Combined:\n", - " Total RSS: 1.437\n", - " Private RSS: 1.125\n", - " Shared RSS: 0.32\n", - "Max GPU RAM:\n", - " Unit: gigabytes\n", - " System capacity: 16.376\n", - " System: 1.235\n", - " Main: 0.506\n", - " Descendants: 0.506\n", - " Combined: 1.012\n", - "CPU utilization:\n", - " System core count: 12\n", - " Number of expected cores: 2\n", - " System:\n", - " Max sum percent: 456.5\n", - " Max hardware percent: 38.042\n", - " Mean sum percent: 216.675\n", - " Mean hardware percent: 18.056\n", - " Main:\n", - " Max sum percent: 102.6\n", - " Max hardware percent: 51.3\n", - " Mean sum percent: 66.65\n", - " Mean hardware percent: 33.325\n", - " Descendants:\n", - " Max sum percent: 175.8\n", - " Max hardware percent: 87.9\n", - " Mean sum percent: 105.392\n", - " Mean hardware percent: 52.696\n", - " Combined:\n", - " Max sum percent: 278.4\n", - " Max hardware percent: 139.2\n", - " Mean sum percent: 172.042\n", - " Mean hardware percent: 86.021\n", - " Main number of threads: 15\n", - " Descendants number of threads: 13\n", - " Combined number of threads: 28\n", - "GPU utilization:\n", - " System GPU count: 1\n", - " Number of expected GPUs: 1\n", - " GPU percentages:\n", - " Max sum percent: 8.0\n", - " Max hardware percent: 8.0\n", - " Mean sum percent: 1.333\n", - " Mean hardware percent: 1.333\n", - "Compute time:\n", - " Unit: hours\n", - " Time: 0.001\n" + "Overall Resource Usage:\n", + "\tMain Ram:\n", + "\t\t A B\n", + "\t\t 0.92056 0.944374\n", + "\tDescendants Ram:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCombined Ram:\n", + "\t\t A B\n", + "\t\t 0.92056 0.944374\n", + "\tSystem Ram:\n", + "\t\t B A\n", + "\t\t 5.553644 5.701517\n", + "\tMain Gpu Ram:\n", + "\t\t A B\n", + "\t\t 0.506 0.506\n", + "\tDescendants Gpu Ram:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCombined Gpu Ram:\n", + "\t\t A B\n", + "\t\t 0.506 0.506\n", + "\tSystem Gpu Ram:\n", + "\t\t A B\n", + "\t\t 0.727 0.727\n", + "\tGpu Sum Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 3.0\n", + "\tGpu Hardware Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 3.0\n", + "\tMain N Threads:\n", + "\t\t A B\n", + "\t\t 15.0 15.0\n", + "\tDescendants N Threads:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCombined N Threads:\n", + "\t\t A B\n", + "\t\t 15.0 15.0\n", + "\tCpu System Sum Utilization Percent:\n", + "\t\t B A\n", + "\t\t 133.4 138.4\n", + "\tCpu System Hardware Utilization Percent:\n", + "\t\t B A\n", + "\t\t 11.116667 11.533333\n", + "\tCpu Main Sum Utilization Percent:\n", + "\t\t B A\n", + "\t\t 103.2 103.3\n", + "\tCpu Main Hardware Utilization Percent:\n", + "\t\t B A\n", + "\t\t 8.6 8.608333\n", + "\tCpu Descendants Sum Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCpu Descendants Hardware Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCpu Combined Sum Utilization Percent:\n", + "\t\t B A\n", + "\t\t 103.2 103.3\n", + "\tCpu Combined Hardware Utilization Percent:\n", + "\t\t B A\n", + "\t\t 8.6 8.608333\n", + "Code Block Resource Usage:\n", + "\tMain Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.912278 0.936559\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.92056 0.944374\n", + "\tDescendants Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCombined Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.912278 0.936559\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.92056 0.944374\n", + "\tSystem Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 5.261357 5.553644\n", + "\t\tmy-function:\n", + "\t\t\t B A\n", + "\t\t\t 5.315219 5.701517\n", + "\tMain Gpu Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.506 0.506\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.506 0.506\n", + "\tDescendants Gpu Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCombined Gpu Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.506 0.506\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.506 0.506\n", + "\tSystem Gpu Ram:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.727 0.727\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.727 0.727\n", + "\tGpu Sum Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 3.0\n", + "\tGpu Hardware Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 3.0\n", + "\tMain N Threads:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 15.0 15.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 15.0 15.0\n", + "\tDescendants N Threads:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCombined N Threads:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 15.0 15.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 15.0 15.0\n", + "\tCpu System Sum Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 130.8 138.4\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 131.1 133.4\n", + "\tCpu System Hardware Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 10.9 11.533333\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 10.925 11.116667\n", + "\tCpu Main Sum Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 103.1 103.3\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 102.1 103.2\n", + "\tCpu Main Hardware Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 8.591667 8.608333\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 8.508333 8.6\n", + "\tCpu Descendants Sum Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCpu Descendants Hardware Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCpu Combined Sum Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 103.1 103.3\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 102.1 103.2\n", + "\tCpu Combined Hardware Utilization Percent:\n", + "\t\tmy-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 8.591667 8.608333\n", + "\t\tmy-function:\n", + "\t\t\t A B\n", + "\t\t\t 8.508333 8.6\n", + "Code Block Compute Time:\n", + "\tmy-code-block:\n", + "\t\t B A\n", + "\t\t 2.789909 2.869182\n", + "\tmy-function:\n", + "\t\t A B\n", + "\t\t 2.570437 2.577679\n", + "\n" ] } ], "source": [ - "import multiprocessing as mp\n", - "ctx = mp.get_context(method='spawn')\n", - "child_process = ctx.Process(target=example_function)\n", - "with gput.Tracker(n_expected_cores=2, sleep_time=0.2) as tracker:\n", - " child_process.start()\n", - " example_function()\n", - " child_process.join()\n", - "child_process.close()\n", - "print(tracker)" + "print(results)" ] }, { @@ -651,6 +2152,22 @@ "## CLI" ] }, + { + "cell_type": "markdown", + "id": "5c7393bd-e3d3-4247-af95-4cdf66b43226", + "metadata": {}, + "source": [ + "### Tracking" + ] + }, + { + "cell_type": "markdown", + "id": "99599c6a-ae1e-4e5f-9513-09ee33cb0a4d", + "metadata": {}, + "source": [ + "#### Basics" + ] + }, { "cell_type": "markdown", "id": "54a3a61b-81cf-4a65-85af-7ba15a111970", @@ -661,7 +2178,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 26, "id": "f7deae10-a16b-4a38-acc6-c354911200ab", "metadata": {}, "outputs": [ @@ -674,21 +2191,37 @@ "Usage:\n", " gpu-tracker -h | --help\n", " gpu-tracker -v | --version\n", - " gpu-tracker --execute= [--output=] [--format=] [--st=] [--ru=] [--gru=] [--tu=] [--nec=] [--guuids=] [--disable-logs]\n", + " gpu-tracker --execute= [--output=] [--format=] [--tconfig=] [--st=] [--ru=] [--gru=] [--tu=] [--nec=] [--guuids=] [--disable-logs] [--gb=] [--tf=] [--overwrite]\n", + " gpu-tracker sub-track combine --stf= [-p ]...\n", + " gpu-tracker sub-track analyze --tf= --stf= [--output=] [--format=]\n", + " gpu-tracker sub-track compare [--output=] [--format=] [--cconfig=] [-m =...] [--stat=]\n", "\n", "Options:\n", " -h --help Show this help message and exit.\n", " -v --version Show package version and exit.\n", " -e --execute= The command to run along with its arguments all within quotes e.g. \"ls -l -a\".\n", - " -o --output= File path to store the computational-resource-usage measurements. If not set, prints measurements to the screen.\n", - " -f --format= File format of the output. Either 'json' or 'text'. Defaults to 'text'.\n", + " -o --output= File path to store the computational-resource-usage measurements in the case of tracking or the analysis report in the case of sub-tracking. If not set, prints to the screen.\n", + " -f --format= File format of the output. Either 'json', 'text', or 'pickle'. Defaults to 'text'.\n", + " --tconfig= JSON config file containing the key word arguments to the ``Tracker`` class (see API) to be optionally used instead of the corresponding commandline options. If any commandline options are set, they will override the corresponding arguments provided by the config file.\n", " --st= The number of seconds to sleep in between usage-collection iterations.\n", " --ru= One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'.\n", " --gru= One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'.\n", " --tu= One of 'seconds', 'minutes', 'hours', or 'days'.\n", " --nec= The number of cores expected to be used. Defaults to the number of cores in the entire operating system.\n", " --guuids= Comma separated list of the UUIDs of the GPUs for which to track utilization e.g. gpu-uuid1,gpu-uuid2,etc. Defaults to all the GPUs in the system.\n", - " --disable-logs If set, warnings are suppressed during tracking. Otherwise, the Tracker logs warnings as usual.\n" + " --disable-logs If set, warnings are suppressed during tracking. Otherwise, the Tracker logs warnings as usual.\n", + " --gb= The brand of GPU to profile. Valid values are nvidia and amd. Defaults to the brand of GPU detected in the system, checking NVIDIA first.\n", + " --tf= If specified, stores the individual resource usage measurements at each iteration. Valid file formats are CSV (.csv) and SQLite (.sqlite) where the SQLite file format stores the data in a table called \"data\" and allows for more efficient querying.\n", + " --overwrite Whether to overwrite the tracking file if it already existed before the beginning of this tracking session. Do not set if the data in the existing tracking file is still needed.\n", + " sub-track Perform sub-tracking related commands.\n", + " combine Combines multiple sub-tracking files into one. This is usually a result of sub-tracking a code block that is called in multiple simultaneous processes.\n", + " --stf= The path to the sub-tracking file used to specify the timestamps of specific code-blocks. If not generated by the gpu-tracker API, must be either a CSV or SQLite file (where the SQLite file contains a table called \"data\") where the headers are precisely process_id, code_block_name, position, and timestamp. The process_id is the ID of the process where the code block is called. code_block_name is the name of the code block. position is whether it is the start or the stopping point of the code block where 0 represents start and 1 represents stop. And timestamp is the timestamp where the code block starts or where it stops.\n", + " -p Paths to the sub-tracking files to combine. Must all be the same file format and the same file format as the resulting sub-tracking file (either .csv or .sqlite). If only one path is provided, it is interpreted as a path to a directory and all the files in this directory are combined.\n", + " analyze Generate the sub-tracking analysis report using the tracking file and sub-tracking file for resource usage of specific code blocks.\n", + " compare Compares multiple tracking sessions to determine differences in computational resource usage by loading sub-tracking results given their file paths. Sub-tracking results files must be in pickle format e.g. running the ``sub-track analyze`` command and specifying a file path for ``--output`` and 'pickle' for the ``--format`` option. If code block results are not included in the sub-tracking files (i.e. no code blocks were sub-tracked), then only overall results are compared.\n", + " --cconfig= JSON config file containing the ``file_path_map`` argument for the ``TrackerComparison`` class and ``statistic`` argument for its ``compare`` method (see API) that can be used instead of the corresponding ``-m =`` and ``--stat=`` commandline options respectively. If additional ``-m =`` options are added on the commandline in addition to a config file, they will be added to the ``file_path_map`` in the config file. If a ``--stat`` option is provided on the commandline, it will override the ``statistic`` in the config file.\n", + " -m = Mapping of tracking session names to the path of the file containing the sub-tracking results of said tracking session. Must be in pickle format.\n", + " --stat= The summary statistic of the measurements to compare. One of 'min', 'max', 'mean', or 'std'. Defaults to 'mean'.\n" ] } ], @@ -706,7 +2239,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 30, "id": "ea7c710f-a238-460d-836c-a979e1c72f4f", "metadata": {}, "outputs": [ @@ -718,19 +2251,19 @@ "Max RAM:\n", " Unit: gigabytes\n", " System capacity: 67.254\n", - " System: 2.896\n", + " System: 5.61\n", " Main:\n", " Total RSS: 0.003\n", " Private RSS: 0.0\n", " Shared RSS: 0.003\n", " Descendants:\n", - " Total RSS: 0.877\n", - " Private RSS: 0.759\n", - " Shared RSS: 0.118\n", - " Combined:\n", - " Total RSS: 0.878\n", - " Private RSS: 0.759\n", + " Total RSS: 0.879\n", + " Private RSS: 0.76\n", " Shared RSS: 0.119\n", + " Combined:\n", + " Total RSS: 0.881\n", + " Private RSS: 0.761\n", + " Shared RSS: 0.12\n", "Max GPU RAM:\n", " Unit: gigabytes\n", " System capacity: 16.376\n", @@ -742,25 +2275,25 @@ " System core count: 12\n", " Number of expected cores: 12\n", " System:\n", - " Max sum percent: 324.1\n", - " Max hardware percent: 27.008\n", - " Mean sum percent: 164.91\n", - " Mean hardware percent: 13.743\n", + " Max sum percent: 324.8\n", + " Max hardware percent: 27.067\n", + " Mean sum percent: 152.109\n", + " Mean hardware percent: 12.676\n", " Main:\n", " Max sum percent: 0.0\n", " Max hardware percent: 0.0\n", " Mean sum percent: 0.0\n", " Mean hardware percent: 0.0\n", " Descendants:\n", - " Max sum percent: 361.4\n", - " Max hardware percent: 30.117\n", - " Mean sum percent: 123.42\n", - " Mean hardware percent: 10.285\n", + " Max sum percent: 201.8\n", + " Max hardware percent: 16.817\n", + " Mean sum percent: 102.245\n", + " Mean hardware percent: 8.52\n", " Combined:\n", - " Max sum percent: 361.4\n", - " Max hardware percent: 30.117\n", - " Mean sum percent: 123.42\n", - " Mean hardware percent: 10.285\n", + " Max sum percent: 201.8\n", + " Max hardware percent: 16.817\n", + " Mean sum percent: 102.245\n", + " Mean hardware percent: 8.52\n", " Main number of threads: 1\n", " Descendants number of threads: 12\n", " Combined number of threads: 13\n", @@ -787,7 +2320,15 @@ "id": "c45091e7-0e85-4a8d-8836-c2dce1bd547f", "metadata": {}, "source": [ - "*Notice that the RAM and GPU RAM usage primarily takes place in the descendant processes since the bash command itself calls the commands relevant to resource usage.*" + "*Notice that the RAM and GPU RAM usage primarily takes place in the descendant processes since, in this example, the bash command itself calls the commands relevant to resource usage.*" + ] + }, + { + "cell_type": "markdown", + "id": "0bc17756-7919-4a70-b650-4959dc71d10a", + "metadata": {}, + "source": [ + "#### Options" ] }, { @@ -1189,6 +2730,565 @@ "source": [ "!cat out.json" ] + }, + { + "cell_type": "markdown", + "id": "770fcd73-4952-4167-a4ac-767b8c69a08b", + "metadata": {}, + "source": [ + "Alternative to typing out the tracking configuration via commandline options, one can specify a config JSON file via the `--tconfig` option." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "329d5c89-c482-4ddf-9e01-f818ac66cb0f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"sleep_time\": 0.5,\n", + " \"ram_unit\": \"megabytes\",\n", + " \"gpu_ram_unit\": \"megabytes\",\n", + " \"time_unit\": \"seconds\"\n", + "}\n" + ] + } + ], + "source": [ + "!cat config.json" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "c7a137f2-c306-4002-9c07-7fb87392156b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Resource tracking complete. Process completed with status code: 0\n", + "Max RAM:\n", + " Unit: megabytes\n", + " System capacity: 67254.166\n", + " System: 4511.437\n", + " Main:\n", + " Total RSS: 2.957\n", + " Private RSS: 0.319\n", + " Shared RSS: 2.638\n", + " Descendants:\n", + " Total RSS: 894.923\n", + " Private RSS: 781.222\n", + " Shared RSS: 113.701\n", + " Combined:\n", + " Total RSS: 896.135\n", + " Private RSS: 781.541\n", + " Shared RSS: 114.594\n", + "Max GPU RAM:\n", + " Unit: megabytes\n", + " System capacity: 16376.0\n", + " System: 727.0\n", + " Main: 0.0\n", + " Descendants: 314.0\n", + " Combined: 314.0\n", + "CPU utilization:\n", + " System core count: 12\n", + " Number of expected cores: 12\n", + " System:\n", + " Max sum percent: 259.3\n", + " Max hardware percent: 21.608\n", + " Mean sum percent: 160.9\n", + " Mean hardware percent: 13.408\n", + " Main:\n", + " Max sum percent: 0.0\n", + " Max hardware percent: 0.0\n", + " Mean sum percent: 0.0\n", + " Mean hardware percent: 0.0\n", + " Descendants:\n", + " Max sum percent: 102.8\n", + " Max hardware percent: 8.567\n", + " Mean sum percent: 96.529\n", + " Mean hardware percent: 8.044\n", + " Combined:\n", + " Max sum percent: 102.8\n", + " Max hardware percent: 8.567\n", + " Mean sum percent: 96.529\n", + " Mean hardware percent: 8.044\n", + " Main number of threads: 1\n", + " Descendants number of threads: 12\n", + " Combined number of threads: 13\n", + "GPU utilization:\n", + " System GPU count: 1\n", + " Number of expected GPUs: 1\n", + " GPU percentages:\n", + " Max sum percent: 0.0\n", + " Max hardware percent: 0.0\n", + " Mean sum percent: 0.0\n", + " Mean hardware percent: 0.0\n", + "Compute time:\n", + " Unit: seconds\n", + " Time: 3.913\n" + ] + } + ], + "source": [ + "!gpu-tracker -e 'bash example-script.sh' --tconfig=config.json" + ] + }, + { + "cell_type": "markdown", + "id": "b6f91506-3111-4b9c-81fc-a2c24c54b2ab", + "metadata": {}, + "source": [ + "### Sub-tracking" + ] + }, + { + "cell_type": "markdown", + "id": "dd81b0c2-7cb5-47b2-bee6-d7e813cf13c8", + "metadata": {}, + "source": [ + "#### Basics" + ] + }, + { + "cell_type": "markdown", + "id": "877098b6-c19b-4a0d-892c-4adce108be8c", + "metadata": {}, + "source": [ + "The `sub-track` subcommand introduces functionality related to sub-tracking i.e. analyzing computational resource usage for individual code blocks rather than the entire process. This requires a tracking file and a sub-tracking file. The tracking file can be created by specifying the `--tf` option when profiling a process using `--execute`. The sub-tracking file can be created using the gpu-tracker API i.e. the `SubTracker` class. If the process being profiled is not a python script, the sub-tracking file can be generated in any programming language as long as it follows the following format:\n", + "\n", + "It is either a CSV or SQLite file where the headers are `process_id,code_block_name,position,timestamp`. The `process_id` column is the ID (integer) of the process where the code block was called. The `code_block_name` is the given name (string) of the code block to distinguish it from other code blocks being sub-tracked. The `position` is an integer of either the value 0 or 1 where 0 indicates the start of the code block and 1 indicates the stopping point of the code block. Finally `timestamp` (float) is the timestamp when the code block either starts (where `position` is 0) or when it stops (where `position` is 1). Both a start timestamp and stop timestamp must be logged for every call to the code block of interest. If using an SQLite file for more efficient querying of longer tracking sessions, the name of the table must be 'data'.\n", + "\n", + "If sub-tracking a code block that is called in multiple processes, the sub-tracking files of that code block must be unique to each process. For convenience, the `sub-track combine` subcommand allows for combining these into a single sub-tracking file that can be used for downstream analysis. This example combines 'sub-tracking1.csv' and 'sub-tracking2.csv' into a single sub-tracking file of the name 'combined-file.csv'. Alternatively, if the `-p` option is only used once, rather than being interpretted as list of files, it is instead interpretted as the path to a directory containing the sub-tracking files to combine." + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "d43af259-b884-4da0-b7b7-64200395a438", + "metadata": {}, + "outputs": [], + "source": [ + "!gpu-tracker sub-track combine --stf=combined-file.csv -p sub-tracking1.csv -p sub-tracking2.csv" + ] + }, + { + "cell_type": "markdown", + "id": "af5f9011-e204-4fe2-8345-b768870814f4", + "metadata": {}, + "source": [ + "#### Analysis" + ] + }, + { + "cell_type": "markdown", + "id": "f7938e80-5d6d-4739-b346-aea1680831e9", + "metadata": {}, + "source": [ + "Once a tracking and sub-tracking file is available, the `sub-track analyze` subcommand can generate the sub-tracking results. These can be stored in JSON, text, or pickle format where the pickle format is the same as the `SubTrackingResults` object from the API. If the `--output` option is specified, the content can be stored in the given file path. By default, the content prints to the screen and it is in text format by default." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "687f9c0c-bc6d-448c-afdd-17df7a18a6e6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overall:\n", + "\t min max mean std\n", + "\tmain_ram 0.341860 0.944374 0.856037 0.125014\n", + "\tdescendants_ram 0.000000 0.000000 0.000000 0.000000\n", + "\tcombined_ram 0.341860 0.944374 0.856037 0.125014\n", + "\tsystem_ram 4.859711 5.553644 5.253445 0.134081\n", + "\tmain_gpu_ram 0.000000 0.506000 0.429920 0.170432\n", + "\tdescendants_gpu_ram 0.000000 0.000000 0.000000 0.000000\n", + "\tcombined_gpu_ram 0.000000 0.506000 0.429920 0.170432\n", + "\tsystem_gpu_ram 0.215000 0.727000 0.650320 0.172010\n", + "\tgpu_sum_utilization_percent 0.000000 3.000000 0.120000 0.600000\n", + "\tgpu_hardware_utilization_percent 0.000000 3.000000 0.120000 0.600000\n", + "\tmain_n_threads 12.000000 15.000000 14.720000 0.842615\n", + "\tdescendants_n_threads 0.000000 0.000000 0.000000 0.000000\n", + "\tcombined_n_threads 12.000000 15.000000 14.720000 0.842615\n", + "\tcpu_system_sum_utilization_percent 11.900000 133.400000 119.212000 22.741909\n", + "\tcpu_system_hardware_utilization_percent 0.991667 11.116667 9.934333 1.895159\n", + "\tcpu_main_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767\n", + "\tcpu_main_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564\n", + "\tcpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\tcpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\tcpu_combined_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767\n", + "\tcpu_combined_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564\n", + "Static Data:\n", + "\t ram_unit gpu_ram_unit time_unit ram_system_capacity gpu_ram_system_capacity system_core_count n_expected_cores system_gpu_count n_expected_gpus\n", + "\t gigabytes gigabytes hours 67.254166 16.376 12 12 1 1\n", + "Code Block Results:\n", + "\tName: my-code-block\n", + "\tNum Timepoints: 12\n", + "\tNum Calls: 3\n", + "\tNum Non Empty Calls: 3\n", + "\tCompute Time:\n", + "\t\t min max mean std total\n", + "\t\t 2.580433 2.789909 2.651185 0.120147 7.953554\n", + "\tResource Usage:\n", + "\t\t min max mean std\n", + "\t\tmain_ram 0.341860 0.936559 0.808736 0.167663\n", + "\t\tdescendants_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_ram 0.341860 0.936559 0.808736 0.167663\n", + "\t\tsystem_ram 4.859711 5.553644 5.231854 0.191567\n", + "\t\tmain_gpu_ram 0.000000 0.506000 0.363500 0.225892\n", + "\t\tdescendants_gpu_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_gpu_ram 0.000000 0.506000 0.363500 0.225892\n", + "\t\tsystem_gpu_ram 0.215000 0.727000 0.583250 0.228088\n", + "\t\tgpu_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tgpu_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tmain_n_threads 12.000000 15.000000 14.416667 1.164500\n", + "\t\tdescendants_n_threads 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_n_threads 12.000000 15.000000 14.416667 1.164500\n", + "\t\tcpu_system_sum_utilization_percent 11.900000 130.800000 113.641667 32.352363\n", + "\t\tcpu_system_hardware_utilization_percent 0.991667 10.900000 9.470139 2.696030\n", + "\t\tcpu_main_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587\n", + "\t\tcpu_main_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549\n", + "\t\tcpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_combined_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587\n", + "\t\tcpu_combined_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549\n", + "\n", + "\tName: my-function\n", + "\tNum Timepoints: 12\n", + "\tNum Calls: 3\n", + "\tNum Non Empty Calls: 3\n", + "\tCompute Time:\n", + "\t\t min max mean std total\n", + "\t\t 2.538011 2.577679 2.553176 0.021419 7.659528\n", + "\tResource Usage:\n", + "\t\t min max mean std\n", + "\t\tmain_ram 0.864592 0.944374 0.896998 0.034505\n", + "\t\tdescendants_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_ram 0.864592 0.944374 0.896998 0.034505\n", + "\t\tsystem_ram 5.203415 5.315219 5.271566 0.038751\n", + "\t\tmain_gpu_ram 0.314000 0.506000 0.490000 0.055426\n", + "\t\tdescendants_gpu_ram 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_gpu_ram 0.314000 0.506000 0.490000 0.055426\n", + "\t\tsystem_gpu_ram 0.535000 0.727000 0.711000 0.055426\n", + "\t\tgpu_sum_utilization_percent 0.000000 3.000000 0.250000 0.866025\n", + "\t\tgpu_hardware_utilization_percent 0.000000 3.000000 0.250000 0.866025\n", + "\t\tmain_n_threads 15.000000 15.000000 15.000000 0.000000\n", + "\t\tdescendants_n_threads 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcombined_n_threads 15.000000 15.000000 15.000000 0.000000\n", + "\t\tcpu_system_sum_utilization_percent 120.300000 133.400000 124.566667 4.001439\n", + "\t\tcpu_system_hardware_utilization_percent 10.025000 11.116667 10.380556 0.333453\n", + "\t\tcpu_main_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332\n", + "\t\tcpu_main_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111\n", + "\t\tcpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000\n", + "\t\tcpu_combined_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332\n", + "\t\tcpu_combined_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111\n", + "\n", + "\n" + ] + } + ], + "source": [ + "!gpu-tracker sub-track analyze --tf=tracking.csv --stf=sub-tracking.csv" + ] + }, + { + "cell_type": "markdown", + "id": "ad33959f-9a1b-4513-bba9-2d3edd202094", + "metadata": {}, + "source": [ + "The overall resource usage of the tracking session is provided as well as its static data. This is followed by the compute time and resource usage of each code block." + ] + }, + { + "cell_type": "markdown", + "id": "674be168-fa7e-4e9e-9f80-aee90b200084", + "metadata": {}, + "source": [ + "#### Comparison" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "364d930d-cfb1-4cda-941a-7e88704f15c9", + "metadata": {}, + "outputs": [], + "source": [ + "Storing the results of the sub-tracking analysis in a pickle file allows for one tracking session to be compared to another." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "3d4da609-5d5c-42ac-a031-225067571682", + "metadata": {}, + "outputs": [], + "source": [ + "!gpu-tracker sub-track analyze --tf=tracking.csv --stf=sub-tracking.csv --format=pickle --output=my-results.pkl" + ] + }, + { + "cell_type": "markdown", + "id": "3befd685-c4e6-4736-bce4-260271342ba0", + "metadata": {}, + "source": [ + "The `sub-track compare` subcommand compares the computational resource usage of multiple tracking sessions. This is useful when you want to determine how a change can impact the computational efficiency of your process, whether it be different input data, an alternative implementation, etc. The `-m` option creates a mapping from the given name of a tracking session to the file path where its sub-tracking results are stored in pickle format. Say you wanted to call one tracking session 'A' and then the second tracking session 'B' where the results of tracking session 'A' are stored in 'results.pkl' and that of session 'B' are in 'results2.pkl'." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "0afbfad8-4bce-4f36-bdb0-475e8afe633b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overall Resource Usage:\n", + "\tMain Ram:\n", + "\t\t B A\n", + "\t\t 0.856037 0.861921\n", + "\tDescendants Ram:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCombined Ram:\n", + "\t\t B A\n", + "\t\t 0.856037 0.861921\n", + "\tSystem Ram:\n", + "\t\t B A\n", + "\t\t 5.253445 5.281926\n", + "\tMain Gpu Ram:\n", + "\t\t B A\n", + "\t\t 0.42992 0.448364\n", + "\tDescendants Gpu Ram:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCombined Gpu Ram:\n", + "\t\t B A\n", + "\t\t 0.42992 0.448364\n", + "\tSystem Gpu Ram:\n", + "\t\t B A\n", + "\t\t 0.65032 0.668909\n", + "\tGpu Sum Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 0.12\n", + "\tGpu Hardware Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 0.12\n", + "\tMain N Threads:\n", + "\t\t B A\n", + "\t\t 14.72 14.757576\n", + "\tDescendants N Threads:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCombined N Threads:\n", + "\t\t B A\n", + "\t\t 14.72 14.757576\n", + "\tCpu System Sum Utilization Percent:\n", + "\t\t B A\n", + "\t\t 119.212 121.918182\n", + "\tCpu System Hardware Utilization Percent:\n", + "\t\t B A\n", + "\t\t 9.934333 10.159848\n", + "\tCpu Main Sum Utilization Percent:\n", + "\t\t B A\n", + "\t\t 96.924 99.060606\n", + "\tCpu Main Hardware Utilization Percent:\n", + "\t\t B A\n", + "\t\t 8.077 8.255051\n", + "\tCpu Descendants Sum Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCpu Descendants Hardware Utilization Percent:\n", + "\t\t A B\n", + "\t\t 0.0 0.0\n", + "\tCpu Combined Sum Utilization Percent:\n", + "\t\t B A\n", + "\t\t 96.924 99.060606\n", + "\tCpu Combined Hardware Utilization Percent:\n", + "\t\t B A\n", + "\t\t 8.077 8.255051\n", + "Code Block Resource Usage:\n", + "\tMain Ram:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 0.808736 0.846999\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.888034 0.896998\n", + "\tDescendants Ram:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCombined Ram:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 0.808736 0.846999\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.888034 0.896998\n", + "\tSystem Ram:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 5.170665 5.231854\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 5.271566 5.476632\n", + "\tMain Gpu Ram:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 0.3635 0.415429\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 0.49 0.506\n", + "\tDescendants Gpu Ram:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCombined Gpu Ram:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 0.3635 0.415429\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 0.49 0.506\n", + "\tSystem Gpu Ram:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 0.58325 0.635714\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 0.711 0.727\n", + "\tGpu Sum Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.25\n", + "\tGpu Hardware Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.25\n", + "\tMain N Threads:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 14.416667 14.619048\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 15.0 15.0\n", + "\tDescendants N Threads:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCombined N Threads:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 14.416667 14.619048\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 15.0 15.0\n", + "\tCpu System Sum Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 113.641667 120.142857\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 124.566667 125.025\n", + "\tCpu System Hardware Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 9.470139 10.011905\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 10.380556 10.41875\n", + "\tCpu Main Sum Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 96.583333 98.652381\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 98.841667 99.775\n", + "\tCpu Main Hardware Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 8.048611 8.221032\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 8.236806 8.314583\n", + "\tCpu Descendants Sum Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCpu Descendants Hardware Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "my-function:\n", + "\t\t\t A B\n", + "\t\t\t 0.0 0.0\n", + "\tCpu Combined Sum Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 96.583333 98.652381\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 98.841667 99.775\n", + "\tCpu Combined Hardware Utilization Percent:\n", + "my-code-block:\n", + "\t\t\t B A\n", + "\t\t\t 8.048611 8.221032\n", + "my-function:\n", + "\t\t\t B A\n", + "\t\t\t 8.236806 8.314583\n", + "Code Block Compute Time:\n", + "my-code-block:\n", + "\t\t B A\n", + "\t\t 2.651185 2.68558\n", + "my-function:\n", + "\t\t B A\n", + "\t\t 2.553176 2.559218\n", + "\n" + ] + } + ], + "source": [ + "!gpu-tracker sub-track compare -m A=results.pkl -m B=results2.pkl" + ] + }, + { + "cell_type": "markdown", + "id": "4604b48c-3fdb-4442-beb8-91e2886656f9", + "metadata": {}, + "source": [ + "Both the overall usage is compared and per code block. The default format is text and the default output is printing to the console. The `--format` and `--output` options can be configured similarly to those in the `sub-track analyze` subcommand. By default, the 'mean' of measurements is compared. Alternatively, the `--stat` option can be set to 'min', 'max', or 'std' to compare a different summary statistic." + ] } ], "metadata": { diff --git a/docs/tutorial.rst b/docs/tutorial.rst index bb80bd7..fcfad94 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -6,17 +6,24 @@ Tutorial API --- +Tracking +~~~~~~~~ + +Basics +^^^^^^ + The ``gpu_tracker`` package provides the ``Tracker`` class which uses a subprocess to measure computational resource usage, namely the compute time, maximum CPU utilization, mean CPU utilization, maximum RAM used, maximum GPU utilization, mean GPU utilization, and maximum GPU RAM used. -The ``start()`` method starts this process which tracks usage in the -background. After calling ``start()``, one can write the code for which -resource usage is measured, followed by calling the ``stop()`` method. -The compute time will be the time from the call to ``start()`` to the -call to ``stop()`` and the RAM, GPU RAM, CPU utilization, and GPU -utilization quantities will be the respective computational resources -used by the code that’s in between ``start()`` and ``stop()``. +It supports both NVIDIA and AMD GPUs. The ``start()`` method starts this +process which tracks usage in the background. The ``Tracker`` class can +be used as a context manager. Upon entering the context, one can write +the code for which resource usage is measured. The compute time will be +the time from entering the context to exiting the context and the RAM, +GPU RAM, CPU utilization, and GPU utilization quantities will be the +respective computational resources used by the code that’s within the +context. .. code:: python3 @@ -25,10 +32,8 @@ used by the code that’s in between ``start()`` and ``stop()``. .. code:: python3 - tracker = gput.Tracker(n_expected_cores=1, sleep_time=0.1) - tracker.start() - example_function() - tracker.stop() + with gput.Tracker(n_expected_cores=1, sleep_time=0.1) as tracker: + example_function() The ``Tracker`` class implements the ``__str__`` method so it can be printed as a string with the values and units of each computational @@ -44,19 +49,19 @@ resource formatted. Max RAM: Unit: gigabytes System capacity: 67.254 - System: 2.001 + System: 4.307 Main: - Total RSS: 0.94 - Private RSS: 0.786 - Shared RSS: 0.165 + Total RSS: 0.924 + Private RSS: 0.755 + Shared RSS: 0.171 Descendants: Total RSS: 0.0 Private RSS: 0.0 Shared RSS: 0.0 Combined: - Total RSS: 0.94 - Private RSS: 0.786 - Shared RSS: 0.165 + Total RSS: 0.924 + Private RSS: 0.755 + Shared RSS: 0.171 Max GPU RAM: Unit: gigabytes System capacity: 16.376 @@ -68,25 +73,25 @@ resource formatted. System core count: 12 Number of expected cores: 1 System: - Max sum percent: 162.3 - Max hardware percent: 13.525 - Mean sum percent: 144.283 - Mean hardware percent: 12.024 + Max sum percent: 222.6 + Max hardware percent: 18.55 + Mean sum percent: 149.285 + Mean hardware percent: 12.44 Main: - Max sum percent: 101.4 - Max hardware percent: 101.4 - Mean sum percent: 96.7 - Mean hardware percent: 96.7 + Max sum percent: 103.3 + Max hardware percent: 103.3 + Mean sum percent: 94.285 + Mean hardware percent: 94.285 Descendants: Max sum percent: 0.0 Max hardware percent: 0.0 Mean sum percent: 0.0 Mean hardware percent: 0.0 Combined: - Max sum percent: 101.4 - Max hardware percent: 101.4 - Mean sum percent: 96.7 - Mean hardware percent: 96.7 + Max sum percent: 103.3 + Max hardware percent: 103.3 + Mean sum percent: 94.285 + Mean hardware percent: 94.285 Main number of threads: 15 Descendants number of threads: 0 Combined number of threads: 15 @@ -96,8 +101,8 @@ resource formatted. GPU percentages: Max sum percent: 5.0 Max hardware percent: 5.0 - Mean sum percent: 0.417 - Mean hardware percent: 0.417 + Mean sum percent: 0.385 + Mean hardware percent: 0.385 Compute time: Unit: hours Time: 0.001 @@ -167,17 +172,23 @@ into account shared memory but rather adds up the total RSS of all processes, which can lead to an overestimation. For Linux distributions, however, pieces of shared memory are only counted once.* -The ``Tracker`` can alternatively be used as a context manager rather -than explicitly calling ``start()`` and ``stop()``. +The ``Tracker`` can alternatively be used by explicitly calling its +``start()`` and ``stop()`` methods which behave the same as entering and +exiting the context manager respectively. .. code:: python3 - with gput.Tracker() as tracker: - example_function() + tracker = gput.Tracker() + tracker.start() + example_function() + tracker.stop() + +Arguments and Attributes +^^^^^^^^^^^^^^^^^^^^^^^^ -The units of the computational resources can be modified as desired. For -example, to measure the RAM in megabytes, the GPU RAM in megabytes, and -the compute time in seconds: +The units of the computational resources can be modified as desired. The +following example measures the RAM in megabytes, the GPU RAM in +megabytes, and the compute time in seconds. .. code:: python3 @@ -190,24 +201,24 @@ the compute time in seconds: Max RAM: Unit: megabytes - System capacity: 67254.17 - System: 2336.362 + System capacity: 67254.166 + System: 1984.791 Main: - Total RSS: 919.99 - Private RSS: 699.384 - Shared RSS: 230.269 + Total RSS: 873.853 + Private RSS: 638.353 + Shared RSS: 235.68 Descendants: Total RSS: 0.0 Private RSS: 0.0 Shared RSS: 0.0 Combined: - Total RSS: 919.99 - Private RSS: 699.384 - Shared RSS: 230.269 + Total RSS: 873.853 + Private RSS: 638.353 + Shared RSS: 235.68 Max GPU RAM: Unit: megabytes System capacity: 16376.0 - System: 727.0 + System: 728.0 Main: 506.0 Descendants: 0.0 Combined: 506.0 @@ -215,25 +226,25 @@ the compute time in seconds: System core count: 12 Number of expected cores: 12 System: - Max sum percent: 166.5 - Max hardware percent: 13.875 - Mean sum percent: 144.55 - Mean hardware percent: 12.046 + Max sum percent: 161.6 + Max hardware percent: 13.467 + Mean sum percent: 145.517 + Mean hardware percent: 12.126 Main: - Max sum percent: 104.8 - Max hardware percent: 8.733 - Mean sum percent: 97.458 - Mean hardware percent: 8.122 + Max sum percent: 101.5 + Max hardware percent: 8.458 + Mean sum percent: 98.683 + Mean hardware percent: 8.224 Descendants: Max sum percent: 0.0 Max hardware percent: 0.0 Mean sum percent: 0.0 Mean hardware percent: 0.0 Combined: - Max sum percent: 104.8 - Max hardware percent: 8.733 - Mean sum percent: 97.458 - Mean hardware percent: 8.122 + Max sum percent: 101.5 + Max hardware percent: 8.458 + Mean sum percent: 98.683 + Mean hardware percent: 8.224 Main number of threads: 15 Descendants number of threads: 0 Combined number of threads: 15 @@ -241,13 +252,13 @@ the compute time in seconds: System GPU count: 1 Number of expected GPUs: 1 GPU percentages: - Max sum percent: 0.0 - Max hardware percent: 0.0 - Mean sum percent: 0.0 - Mean hardware percent: 0.0 + Max sum percent: 3.0 + Max hardware percent: 3.0 + Mean sum percent: 0.25 + Mean hardware percent: 0.25 Compute time: Unit: seconds - Time: 2.685 + Time: 2.729 The same information as the text format can be provided as a dictionary @@ -264,12 +275,12 @@ via the ``to_json()`` method of the ``Tracker``. { "max_ram": { "unit": "megabytes", - "system_capacity": 67254.1696, - "system": 2336.3624959999997, + "system_capacity": 67254.165504, + "system": 1984.790528, "main": { - "total_rss": 919.9902719999999, - "private_rss": 699.3838079999999, - "shared_rss": 230.268928 + "total_rss": 873.8529279999999, + "private_rss": 638.353408, + "shared_rss": 235.679744 }, "descendants": { "total_rss": 0.0, @@ -277,15 +288,15 @@ via the ``to_json()`` method of the ``Tracker``. "shared_rss": 0.0 }, "combined": { - "total_rss": 919.9902719999999, - "private_rss": 699.3838079999999, - "shared_rss": 230.268928 + "total_rss": 873.8529279999999, + "private_rss": 638.353408, + "shared_rss": 235.679744 } }, "max_gpu_ram": { "unit": "megabytes", "system_capacity": 16376.0, - "system": 727.0, + "system": 728.0, "main": 506.0, "descendants": 0.0, "combined": 506.0 @@ -294,16 +305,16 @@ via the ``to_json()`` method of the ``Tracker``. "system_core_count": 12, "n_expected_cores": 12, "system": { - "max_sum_percent": 166.5, - "max_hardware_percent": 13.875, - "mean_sum_percent": 144.55, - "mean_hardware_percent": 12.045833333333333 + "max_sum_percent": 161.60000000000002, + "max_hardware_percent": 13.466666666666669, + "mean_sum_percent": 145.51666666666668, + "mean_hardware_percent": 12.12638888888889 }, "main": { - "max_sum_percent": 104.8, - "max_hardware_percent": 8.733333333333333, - "mean_sum_percent": 97.45833333333333, - "mean_hardware_percent": 8.121527777777779 + "max_sum_percent": 101.5, + "max_hardware_percent": 8.458333333333334, + "mean_sum_percent": 98.68333333333334, + "mean_hardware_percent": 8.22361111111111 }, "descendants": { "max_sum_percent": 0.0, @@ -312,10 +323,10 @@ via the ``to_json()`` method of the ``Tracker``. "mean_hardware_percent": 0.0 }, "combined": { - "max_sum_percent": 104.8, - "max_hardware_percent": 8.733333333333333, - "mean_sum_percent": 97.45833333333333, - "mean_hardware_percent": 8.121527777777779 + "max_sum_percent": 101.5, + "max_hardware_percent": 8.458333333333334, + "mean_sum_percent": 98.68333333333334, + "mean_hardware_percent": 8.22361111111111 }, "main_n_threads": 15, "descendants_n_threads": 0, @@ -325,15 +336,15 @@ via the ``to_json()`` method of the ``Tracker``. "system_gpu_count": 1, "n_expected_gpus": 1, "gpu_percentages": { - "max_sum_percent": 0.0, - "max_hardware_percent": 0.0, - "mean_sum_percent": 0.0, - "mean_hardware_percent": 0.0 + "max_sum_percent": 3.0, + "max_hardware_percent": 3.0, + "mean_sum_percent": 0.25, + "mean_hardware_percent": 0.25 } }, "compute_time": { "unit": "seconds", - "time": 2.684972047805786 + "time": 2.728560209274292 } } @@ -351,7 +362,7 @@ information for each individual computational resource. .. code:: none - MaxRAM(unit='megabytes', system_capacity=67254.1696, system=2336.3624959999997, main=RSSValues(total_rss=919.9902719999999, private_rss=699.3838079999999, shared_rss=230.268928), descendants=RSSValues(total_rss=0.0, private_rss=0.0, shared_rss=0.0), combined=RSSValues(total_rss=919.9902719999999, private_rss=699.3838079999999, shared_rss=230.268928)) + MaxRAM(unit='megabytes', system_capacity=67254.165504, system=1984.790528, main=RSSValues(total_rss=873.8529279999999, private_rss=638.353408, shared_rss=235.679744), descendants=RSSValues(total_rss=0.0, private_rss=0.0, shared_rss=0.0), combined=RSSValues(total_rss=873.8529279999999, private_rss=638.353408, shared_rss=235.679744)) @@ -377,7 +388,7 @@ information for each individual computational resource. .. code:: none - RSSValues(total_rss=919.9902719999999, private_rss=699.3838079999999, shared_rss=230.268928) + RSSValues(total_rss=873.8529279999999, private_rss=638.353408, shared_rss=235.679744) @@ -390,7 +401,7 @@ information for each individual computational resource. .. code:: none - 919.9902719999999 + 873.8529279999999 @@ -403,7 +414,7 @@ information for each individual computational resource. .. code:: none - MaxGPURAM(unit='megabytes', system_capacity=16376.0, system=727.0, main=506.0, descendants=0.0, combined=506.0) + MaxGPURAM(unit='megabytes', system_capacity=16376.0, system=728.0, main=506.0, descendants=0.0, combined=506.0) @@ -416,42 +427,9 @@ information for each individual computational resource. .. code:: none - ComputeTime(unit='seconds', time=2.684972047805786) - - - -Sometimes the code can fail. In order to collect the resource usage up -to the point of failure, use a try/except block like so: - -.. code:: python3 - - try: - with gput.Tracker() as tracker: - example_function() - raise RuntimeError('AN ERROR') - except Exception as error: - print(f'The following error occured while tracking: {error}') - finally: - print(tracker.resource_usage.max_gpu_ram.main) - - -.. code:: none - - The following error occured while tracking: AN ERROR - 0.506 + ComputeTime(unit='seconds', time=2.728560209274292) -If you do not catch the error in your code or if tracking otherwise is -interrupted (e.g. you are debugging your code and you stop partway), the -``resource_usage`` attribute will not be set and that information will -not be able to be obtained in memory. In such a case, the -``resource_usage`` attribute will be stored in a hidden pickle file in -the working directory with a randomly generated name. Its file path can -be optionally overriden with the ``resource_usage_file`` parameter. - -.. code:: python3 - - tracker = gput.Tracker(resource_usage_file='path/to/my-file.pkl') Below is an example of using a child process. Notice the descendants fields are now non-zero. @@ -461,7 +439,7 @@ fields are now non-zero. import multiprocessing as mp ctx = mp.get_context(method='spawn') child_process = ctx.Process(target=example_function) - with gput.Tracker(n_expected_cores=2, sleep_time=0.2) as tracker: + with gput.Tracker(n_expected_cores=2, sleep_time=0.4) as tracker: child_process.start() example_function() child_process.join() @@ -474,23 +452,23 @@ fields are now non-zero. Max RAM: Unit: gigabytes System capacity: 67.254 - System: 3.033 + System: 2.388 Main: - Total RSS: 0.865 - Private RSS: 0.55 - Shared RSS: 0.32 + Total RSS: 0.849 + Private RSS: 0.528 + Shared RSS: 0.325 Descendants: - Total RSS: 0.854 - Private RSS: 0.737 - Shared RSS: 0.118 + Total RSS: 0.845 + Private RSS: 0.734 + Shared RSS: 0.112 Combined: - Total RSS: 1.437 - Private RSS: 1.125 - Shared RSS: 0.32 + Total RSS: 1.371 + Private RSS: 1.05 + Shared RSS: 0.325 Max GPU RAM: Unit: gigabytes System capacity: 16.376 - System: 1.235 + System: 1.236 Main: 0.506 Descendants: 0.506 Combined: 1.012 @@ -498,25 +476,25 @@ fields are now non-zero. System core count: 12 Number of expected cores: 2 System: - Max sum percent: 456.5 - Max hardware percent: 38.042 - Mean sum percent: 216.675 - Mean hardware percent: 18.056 + Max sum percent: 338.0 + Max hardware percent: 28.167 + Mean sum percent: 183.644 + Mean hardware percent: 15.304 Main: - Max sum percent: 102.6 - Max hardware percent: 51.3 - Mean sum percent: 66.65 - Mean hardware percent: 33.325 + Max sum percent: 101.0 + Max hardware percent: 50.5 + Mean sum percent: 60.178 + Mean hardware percent: 30.089 Descendants: - Max sum percent: 175.8 - Max hardware percent: 87.9 - Mean sum percent: 105.392 - Mean hardware percent: 52.696 + Max sum percent: 354.1 + Max hardware percent: 177.05 + Mean sum percent: 109.033 + Mean hardware percent: 54.517 Combined: - Max sum percent: 278.4 - Max hardware percent: 139.2 - Mean sum percent: 172.042 - Mean hardware percent: 86.021 + Max sum percent: 452.2 + Max hardware percent: 226.1 + Mean sum percent: 169.211 + Mean hardware percent: 84.606 Main number of threads: 15 Descendants number of threads: 13 Combined number of threads: 28 @@ -524,18 +502,1206 @@ fields are now non-zero. System GPU count: 1 Number of expected GPUs: 1 GPU percentages: - Max sum percent: 8.0 - Max hardware percent: 8.0 - Mean sum percent: 1.333 - Mean hardware percent: 1.333 + Max sum percent: 5.0 + Max hardware percent: 5.0 + Mean sum percent: 0.556 + Mean hardware percent: 0.556 Compute time: Unit: hours Time: 0.001 +Sometimes the code can fail. In order to collect the resource usage up +to the point of failure, use a try/except block like so: + +.. code:: python3 + + try: + with gput.Tracker() as tracker: + example_function() + raise RuntimeError('AN ERROR') + except Exception as error: + print(f'The following error occured while tracking: {error}') + finally: + print(tracker.resource_usage.max_gpu_ram.main) + + +.. code:: none + + The following error occured while tracking: AN ERROR + 0.506 + + +If you do not catch the error in your code or if tracking otherwise is +interrupted (e.g. you are debugging your code and you stop partway), the +``resource_usage`` attribute will not be set and that information will +not be able to be obtained in memory. In such a case, the +``resource_usage`` attribute will be stored in a hidden pickle file in +the working directory with a randomly generated name. Its file path can +be optionally overriden with the ``resource_usage_file`` parameter. + +.. code:: python3 + + tracker = gput.Tracker(resource_usage_file='path/to/my-file.pkl') + +While the ``Tracker`` class automatically detects which brand of GPU is +installed (either NVIDIA or AMD), one can explicitly choose the GPU +brand with the ``gpu_brand`` parameter + +.. code:: python3 + + tracker = gput.Tracker(gpu_brand='nvidia') + +While the ``Tracker`` by default stores aggregates of the computational +resource usage across the timepoints, one can store the individual +measured values at every timepoint in a file, either CSV or SQLite +format, using the ``tracking_file`` parameter. **NOTE** for the CSV +format, the static data (e.g. RAM system capacity, number of cores in +the OS, etc.) is stored on the the first two rows with the headers on +the first row followed by the static data on the second row. The headers +of the timepoint data is on the third row followed by the timepoint data +on the remaining rows. The SQLite file, however, stores the static data +and timepoint data in different tables: “data” and “static_data” +respectively. + +.. code:: python3 + + tracker = gput.Tracker(tracking_file='my-file.csv') + tracker = gput.Tracker(tracking_file='my-file.sqlite') + +Sub-tracking +~~~~~~~~~~~~ + +Logging Code Block Timestamps +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +While the ``Tracker`` object by itself can track a block of code, there +are some cases where one might want to track one code block and a +smaller code block within it or track multiple code blocks at a time +without creating several tracking processes simultaneously, especially +when tracking a code block that is called within multi-processing or a +code block that is called several times. Similarly, one might want to +track the resource usage of a particular function whenever it is called. +Whether a function or some other specified code block, the +``SubTracker`` class can determine the computational resources used +during the start times and stop times of a given code block. This +includes the mean resources used during the times the code block is +called, the mean time taken to complete the code block each time it is +called, the number of times it is called, etc. Sub-tracking uses the +tracking file specified by the ``tracking_file`` parameter of the +``Tracker`` object alonside a sub-tracking file which contains the start +and stop times of each code block one desires to sub-track. The +sub-tracking file can be created in Python using the ``SubTracker`` +class, a context manager around the desired code block. Setting the +``overwrite`` parameter (default ``False``) of the ``Tracker`` and +``SubTracker`` to ``True`` overwrites the ``tracking_file`` or +``sub_tracking_file`` respectively if a file of that path already +exists. Keep this paramter at ``False`` to avoid loss of data if it is +still needed. + +.. code:: python3 + + tracker = gput.Tracker(sleep_time=0.5, tracking_file='tracking.csv', overwrite=False) + tracker.start() + # Perform other computation here + for _ in range(5): + with gput.SubTracker(code_block_name='my-code-block', sub_tracking_file='sub-tracking.csv', overwrite=False): + example_function() + # Perform other computation here + +In the above example, a tracking session is initiated within the context +of the ``Tracker`` object whose tracking file is ‘tracking.csv’. Then we +have a for loop wherein a function is called 5 times. Other computation +might be performed before or after this for loop, but if the +computational resource usage of the contents of the for loop is of +interest in particular, that code block can be sub-tracked by wrapping +it within the context of the ``SubTracker`` object whose sub-tracking +file is ‘sub-tracking.csv’. Alternatively, SQLite (.sqlite) files can be +used to speed up querying in the case of very long tracking sessions. +The name of the code block is ‘my-code-block’, given to distinguish it +from other code blocks being sub-tracked. + +If one wants to sub-track all calls to a particular function, the +``sub_track`` function decorator can be used instead of wrapping the +function call with a ``SubTracker`` context every time it is called: + +.. code:: python3 + + @gput.sub_track(code_block_name='my-function', sub_tracking_file='sub-tracking.csv', overwrite=False) + def my_function(*args, **kwargs): + example_function() + + for _ in range(3): + my_function() + tracker.stop() + +When sub-tracking a code block using the ``SubTracker`` context, the +default ``code_block_name`` is the relative path of the Python file +followed by a colon followed by the line number where the ``SubTracker`` +context is initialized. When sub-tracking a function, the default +``code_block_name`` is the relative path of the Python file followed by +a colon followed by the name of the function. + +Analysis +^^^^^^^^ + +Once a tracking file and at least one sub-tracking file have been +created, the results can be analyzed using the ``SubTrackingAnalyzer`` +class, instantiated by passing in the path to the tracking file and the +path to the sub-tracking file. + +.. code:: python3 + + analyzer = gput.SubTrackingAnalyzer(tracking_file='tracking.csv', sub_tracking_file='sub-tracking.csv') + +When sub-tracking a code block within a function that’s part of +multi-processing (i.e. called within one of multiple sub-processes), the +sub-tracking file must be unique to that process, which is why the +default ``sub_tracking_file`` is the process ID followed by “.csv”. One +way or another, a different sub-tracking file must be created per worker +to prevent multiple processes from logging to the same file. The +``SubTrackingAnalyzer`` has a ``combine_sub_tracking_files`` method that +can combine these multiple sub-tracking files into a single sub-tracking +file whose path is specified by the ``sub_tracking_file`` parameter +above. Once a sub-tracking file is created from a single process or +combined from multiple, the results can be obtained via the +``sub_tracking_results`` method. + +.. code:: python3 + + results = analyzer.sub_tracking_results() + type(results) + + + + +.. code:: none + + gpu_tracker.sub_tracker.SubTrackingResults + + + +The ``sub_tracking_results`` method returns a ``SubTrackingResults`` +object which contains summary statistics of the overall resource usage +(all time points in the tracking file) and the per code block resource +usage (the timepoints within calls to a code block i.e. the start/stop +times) as ``DataFrame`` or ``Series`` objects from the ``pandas`` +package. + +.. code:: python3 + + results.overall + + + + +.. raw:: html + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
minmaxmeanstd
main_ram0.3412170.9205600.8619210.100084
descendants_ram0.0000000.0000000.0000000.000000
combined_ram0.3412170.9205600.8619210.100084
system_ram4.6026185.7015175.2819260.220270
main_gpu_ram0.0000000.5060000.4483640.151267
descendants_gpu_ram0.0000000.0000000.0000000.000000
combined_gpu_ram0.0000000.5060000.4483640.151267
system_gpu_ram0.2150000.7270000.6689090.152657
gpu_sum_utilization_percent0.0000000.0000000.0000000.000000
gpu_hardware_utilization_percent0.0000000.0000000.0000000.000000
main_n_threads12.00000015.00000014.7575760.791766
descendants_n_threads0.0000000.0000000.0000000.000000
combined_n_threads12.00000015.00000014.7575760.791766
cpu_system_sum_utilization_percent15.400000138.400000121.91818219.484617
cpu_system_hardware_utilization_percent1.28333311.53333310.1598481.623718
cpu_main_sum_utilization_percent91.400000103.30000099.0606062.571228
cpu_main_hardware_utilization_percent7.6166678.6083338.2550510.214269
cpu_descendants_sum_utilization_percent0.0000000.0000000.0000000.000000
cpu_descendants_hardware_utilization_percent0.0000000.0000000.0000000.000000
cpu_combined_sum_utilization_percent91.400000103.30000099.0606062.571228
cpu_combined_hardware_utilization_percent7.6166678.6083338.2550510.214269
+
+ + + +The ``SubTrackingResults`` class additionally contains the static data +i.e. the information that remains constant throughout the tracking +session. + +.. code:: python3 + + results.static_data + + + + +.. code:: none + + ram_unit gigabytes + gpu_ram_unit gigabytes + time_unit hours + ram_system_capacity 67.254166 + gpu_ram_system_capacity 16.376 + system_core_count 12 + n_expected_cores 12 + system_gpu_count 1 + n_expected_gpus 1 + Name: 0, dtype: object + + + +The ``code_block_results`` attribute of the ``SubTrackingResults`` class +is a list of ``CodeBlockResults`` objects, containing the resource usage +and compute time summary statistics. In this case, there are two +``CodeBlockResults`` objects in the list since there were two code +blocks sub-tracked in this tracking session. + +.. code:: python3 + + [my_code_block_results, my_function_results] = results.code_block_results + type(my_code_block_results) + + + + +.. code:: none + + gpu_tracker.sub_tracker.CodeBlockResults + + + +The ``compute_time`` attribute of the ``CodeBlockResults`` class +contains summary statistics for the time spent on the code block, where +``total`` is the total amount of time spent within the code block during +the tracking session, ``mean`` is the average time taken on each call to +the code block, etc. The ``resource_usage`` attribute provides summary +statistics for the computational resources used during calls to the code +block i.e. within the start/stop times. + +.. code:: python3 + + my_code_block_results.compute_time + + + + +.. code:: none + + min 2.630907 + max 2.869182 + mean 2.685580 + std 0.102789 + total 13.427902 + dtype: float64 + + + +.. code:: python3 + + my_code_block_results.resource_usage + + + + +.. raw:: html + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
minmaxmeanstd
main_ram0.3412170.9122780.8469990.122948
descendants_ram0.0000000.0000000.0000000.000000
combined_ram0.3412170.9122780.8469990.122948
system_ram4.6026185.2613575.1706650.147118
main_gpu_ram0.0000000.5060000.4154290.182971
descendants_gpu_ram0.0000000.0000000.0000000.000000
combined_gpu_ram0.0000000.5060000.4154290.182971
system_gpu_ram0.2150000.7270000.6357140.184676
gpu_sum_utilization_percent0.0000000.0000000.0000000.000000
gpu_hardware_utilization_percent0.0000000.0000000.0000000.000000
main_n_threads12.00000015.00000014.6190480.973457
descendants_n_threads0.0000000.0000000.0000000.000000
combined_n_threads12.00000015.00000014.6190480.973457
cpu_system_sum_utilization_percent15.400000138.400000120.14285724.347907
cpu_system_hardware_utilization_percent1.28333311.53333310.0119052.028992
cpu_main_sum_utilization_percent91.400000103.30000098.6523812.733243
cpu_main_hardware_utilization_percent7.6166678.6083338.2210320.227770
cpu_descendants_sum_utilization_percent0.0000000.0000000.0000000.000000
cpu_descendants_hardware_utilization_percent0.0000000.0000000.0000000.000000
cpu_combined_sum_utilization_percent91.400000103.30000098.6523812.733243
cpu_combined_hardware_utilization_percent7.6166678.6083338.2210320.227770
+
+ + + +Additionally, the ``CodeBlockResults`` class also has attributes for the +name of the code block, the number of times it was called during the +tracking session, the number of calls that included at least one +timepoint, and the total number of timepoints measured within all calls +to the code block. + +.. code:: python3 + + my_code_block_results.name, my_code_block_results.num_calls, my_code_block_results.num_non_empty_calls, my_code_block_results.num_timepoints + + + + +.. code:: none + + ('my-code-block', 5, 5, 21) + + + +The analysis results can also be printed in their entirety. +Alternatively, the ``to_json`` method can provide this comprehensive +information in JSON format. + +.. code:: python3 + + print(results) + + +.. code:: none + + Overall: + min max mean std + main_ram 0.341860 0.944374 0.856037 0.125014 + descendants_ram 0.000000 0.000000 0.000000 0.000000 + combined_ram 0.341860 0.944374 0.856037 0.125014 + system_ram 4.859711 5.553644 5.253445 0.134081 + main_gpu_ram 0.000000 0.506000 0.429920 0.170432 + descendants_gpu_ram 0.000000 0.000000 0.000000 0.000000 + combined_gpu_ram 0.000000 0.506000 0.429920 0.170432 + system_gpu_ram 0.215000 0.727000 0.650320 0.172010 + gpu_sum_utilization_percent 0.000000 3.000000 0.120000 0.600000 + gpu_hardware_utilization_percent 0.000000 3.000000 0.120000 0.600000 + main_n_threads 12.000000 15.000000 14.720000 0.842615 + descendants_n_threads 0.000000 0.000000 0.000000 0.000000 + combined_n_threads 12.000000 15.000000 14.720000 0.842615 + cpu_system_sum_utilization_percent 11.900000 133.400000 119.212000 22.741909 + cpu_system_hardware_utilization_percent 0.991667 11.116667 9.934333 1.895159 + cpu_main_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767 + cpu_main_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564 + cpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_combined_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767 + cpu_combined_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564 + Static Data: + ram_unit gpu_ram_unit time_unit ram_system_capacity gpu_ram_system_capacity system_core_count n_expected_cores system_gpu_count n_expected_gpus + gigabytes gigabytes hours 67.254166 16.376 12 12 1 1 + Code Block Results: + Name: my-code-block + Num Timepoints: 12 + Num Calls: 3 + Num Non Empty Calls: 3 + Compute Time: + min max mean std total + 2.580433 2.789909 2.651185 0.120147 7.953554 + Resource Usage: + min max mean std + main_ram 0.341860 0.936559 0.808736 0.167663 + descendants_ram 0.000000 0.000000 0.000000 0.000000 + combined_ram 0.341860 0.936559 0.808736 0.167663 + system_ram 4.859711 5.553644 5.231854 0.191567 + main_gpu_ram 0.000000 0.506000 0.363500 0.225892 + descendants_gpu_ram 0.000000 0.000000 0.000000 0.000000 + combined_gpu_ram 0.000000 0.506000 0.363500 0.225892 + system_gpu_ram 0.215000 0.727000 0.583250 0.228088 + gpu_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + gpu_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + main_n_threads 12.000000 15.000000 14.416667 1.164500 + descendants_n_threads 0.000000 0.000000 0.000000 0.000000 + combined_n_threads 12.000000 15.000000 14.416667 1.164500 + cpu_system_sum_utilization_percent 11.900000 130.800000 113.641667 32.352363 + cpu_system_hardware_utilization_percent 0.991667 10.900000 9.470139 2.696030 + cpu_main_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587 + cpu_main_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549 + cpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_combined_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587 + cpu_combined_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549 + + Name: my-function + Num Timepoints: 12 + Num Calls: 3 + Num Non Empty Calls: 3 + Compute Time: + min max mean std total + 2.538011 2.577679 2.553176 0.021419 7.659528 + Resource Usage: + min max mean std + main_ram 0.864592 0.944374 0.896998 0.034505 + descendants_ram 0.000000 0.000000 0.000000 0.000000 + combined_ram 0.864592 0.944374 0.896998 0.034505 + system_ram 5.203415 5.315219 5.271566 0.038751 + main_gpu_ram 0.314000 0.506000 0.490000 0.055426 + descendants_gpu_ram 0.000000 0.000000 0.000000 0.000000 + combined_gpu_ram 0.314000 0.506000 0.490000 0.055426 + system_gpu_ram 0.535000 0.727000 0.711000 0.055426 + gpu_sum_utilization_percent 0.000000 3.000000 0.250000 0.866025 + gpu_hardware_utilization_percent 0.000000 3.000000 0.250000 0.866025 + main_n_threads 15.000000 15.000000 15.000000 0.000000 + descendants_n_threads 0.000000 0.000000 0.000000 0.000000 + combined_n_threads 15.000000 15.000000 15.000000 0.000000 + cpu_system_sum_utilization_percent 120.300000 133.400000 124.566667 4.001439 + cpu_system_hardware_utilization_percent 10.025000 11.116667 10.380556 0.333453 + cpu_main_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332 + cpu_main_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111 + cpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_combined_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332 + cpu_combined_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111 + + + + +Comparison +^^^^^^^^^^ + +The ``TrackingComparison`` class allows for comparing the resource usage +of multiple tracking sessions, both the overall usage of the sessions +and any code blocks that were sub-tracked. This is helpful if one wants +to see how changes to the process might impact the computational +efficiency of it, such as changes to implementation, input data, etc. To +do this, the ``TrackingComparison`` takes a mapping of the given name of +a tracking session to the file path where a ``SubTrackingResults`` +object is stored in pickle format. Say we had two tracking sessions and +we wanted to compare them. First, we store the ``results`` of the first +tracking session in a pickle file. If we’d like to re-use the same names +for the ``tracking_file`` and ``sub_tracking_file`` in the second +tracking session, we can safely set the ``overwrite`` argument to +``True`` since their data has been saved in ‘results.pkl’. + +.. code:: python3 + + import pickle as pkl + import os + + with open('results.pkl', 'wb') as file: + pkl.dump(results, file) + + +Once we have the results of the first tracking session saved, we can +start a new tracking session in another run of the program that we are +profiling. Say we made some code changes and we want to compare the two +implementations, we can populate a new ``tracking_file`` and +``sub_tracking_file`` with data from the new tracking session. + +.. code:: python3 + + import gpu_tracker as gput + from example_module import example_function + import pickle as pkl + + @gput.sub_track(code_block_name='my-function', sub_tracking_file='sub-tracking.csv', overwrite=True) + def my_function(*args, **kwargs): + example_function() + + with gput.Tracker(sleep_time=0.5, tracking_file='tracking.csv', overwrite=True): + for _ in range(3): + with gput.SubTracker(code_block_name='my-code-block', sub_tracking_file='sub-tracking.csv', overwrite=True): + example_function() + my_function() + results2 = gput.SubTrackingAnalyzer(tracking_file='tracking.csv', sub_tracking_file='sub-tracking.csv').sub_tracking_results() + with open('results2.pkl', 'wb') as file: + pkl.dump(results2, file) + +The first tracking session stored its results in ‘results.pkl’ while the +second tracking session stored its results in ‘results2.pkl’. Say we +decided to call the first session ‘A’ and the second session ‘B’. The +``TrackingComparison`` object would be initialized like so: + +.. code:: python3 + + comparison = gput.TrackingComparison(file_path_map={'A': 'results.pkl', 'B': 'results2.pkl'}) + +Once the ``TrackingComparison`` is created, its compare method generates +the ``ComparisonResults`` object detailing the computational resource +usage measured in one tracking session to that of the other tracking +sessions. The ``statistic`` parameter determines which summary statistic +of the measurements to compare, defaulting to ‘mean’. In this example, +we will compare the maximum measurements by setting ``statistic`` to +‘max’. + +.. code:: python3 + + results = comparison.compare(statistic='max') + type(results) + + + + +.. code:: none + + gpu_tracker.sub_tracker.ComparisonResults + + + +The ``overall_resource_usage`` attribute of the ``ComparisonResults`` +class is a dictionary mapping each measurement to a ``Series`` comparing +that measurement across all timepoints in one tracking session to +another. + +.. code:: python3 + + results.overall_resource_usage.keys() + + + + +.. code:: none + + dict_keys(['main_ram', 'descendants_ram', 'combined_ram', 'system_ram', 'main_gpu_ram', 'descendants_gpu_ram', 'combined_gpu_ram', 'system_gpu_ram', 'gpu_sum_utilization_percent', 'gpu_hardware_utilization_percent', 'main_n_threads', 'descendants_n_threads', 'combined_n_threads', 'cpu_system_sum_utilization_percent', 'cpu_system_hardware_utilization_percent', 'cpu_main_sum_utilization_percent', 'cpu_main_hardware_utilization_percent', 'cpu_descendants_sum_utilization_percent', 'cpu_descendants_hardware_utilization_percent', 'cpu_combined_sum_utilization_percent', 'cpu_combined_hardware_utilization_percent']) + + + +For example, we can compare the overall maximum ‘main_ram’ of tracking +session ‘A’ to tracking session ‘B’. + +.. code:: python3 + + results.overall_resource_usage['main_ram'] + + + + +.. code:: none + + A 0.920560 + B 0.944374 + dtype: float64 + + + +The ``code_block_resource_usage`` attribute is a dictionary that +compares the same resource usage but for each code block rather than +overall. + +.. code:: python3 + + results.code_block_resource_usage.keys() + + + + +.. code:: none + + dict_keys(['main_ram', 'descendants_ram', 'combined_ram', 'system_ram', 'main_gpu_ram', 'descendants_gpu_ram', 'combined_gpu_ram', 'system_gpu_ram', 'gpu_sum_utilization_percent', 'gpu_hardware_utilization_percent', 'main_n_threads', 'descendants_n_threads', 'combined_n_threads', 'cpu_system_sum_utilization_percent', 'cpu_system_hardware_utilization_percent', 'cpu_main_sum_utilization_percent', 'cpu_main_hardware_utilization_percent', 'cpu_descendants_sum_utilization_percent', 'cpu_descendants_hardware_utilization_percent', 'cpu_combined_sum_utilization_percent', 'cpu_combined_hardware_utilization_percent']) + + + +Each measurement is a dictionary mapping each code block name to the +resources used across tracking sessions in that code block. + +.. code:: python3 + + results.code_block_resource_usage['main_ram'].keys() + + + + +.. code:: none + + dict_keys(['my-code-block', 'my-function']) + + + +For example, the maximum ‘main_ram’ used by ‘my-code-block’ in tracking +session ‘A’ can be compared to that of tracking session ‘B’. + +.. code:: python3 + + results.code_block_resource_usage['main_ram']['my-code-block'] + + + + +.. code:: none + + A 0.912278 + B 0.936559 + dtype: float64 + + + +Finally the ``code_block_compute_time`` attribute is a dictionary that +compares the compute time summary statistics for each code block and for +each tracking session. + +.. code:: python3 + + results.code_block_compute_time.keys() + + + + +.. code:: none + + dict_keys(['my-code-block', 'my-function']) + + + +For example, we can compare the maximum compute time of ‘my-code-block’ +in tracking session ‘A’ to that of tracking session ‘B’. + +.. code:: python3 + + results.code_block_compute_time['my-code-block'] + + + + +.. code:: none + + B 2.789909 + A 2.869182 + dtype: float64 + + + +The comparison results can also be printed in their entirety. +Alternatively, the ``to_json`` method can provide this comprehensive +information in JSON format. + +.. code:: python3 + + print(results) + + +.. code:: none + + Overall Resource Usage: + Main Ram: + A B + 0.92056 0.944374 + Descendants Ram: + A B + 0.0 0.0 + Combined Ram: + A B + 0.92056 0.944374 + System Ram: + B A + 5.553644 5.701517 + Main Gpu Ram: + A B + 0.506 0.506 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.506 0.506 + System Gpu Ram: + A B + 0.727 0.727 + Gpu Sum Utilization Percent: + A B + 0.0 3.0 + Gpu Hardware Utilization Percent: + A B + 0.0 3.0 + Main N Threads: + A B + 15.0 15.0 + Descendants N Threads: + A B + 0.0 0.0 + Combined N Threads: + A B + 15.0 15.0 + Cpu System Sum Utilization Percent: + B A + 133.4 138.4 + Cpu System Hardware Utilization Percent: + B A + 11.116667 11.533333 + Cpu Main Sum Utilization Percent: + B A + 103.2 103.3 + Cpu Main Hardware Utilization Percent: + B A + 8.6 8.608333 + Cpu Descendants Sum Utilization Percent: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + B A + 103.2 103.3 + Cpu Combined Hardware Utilization Percent: + B A + 8.6 8.608333 + Code Block Resource Usage: + Main Ram: + my-code-block: + A B + 0.912278 0.936559 + my-function: + A B + 0.92056 0.944374 + Descendants Ram: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Combined Ram: + my-code-block: + A B + 0.912278 0.936559 + my-function: + A B + 0.92056 0.944374 + System Ram: + my-code-block: + A B + 5.261357 5.553644 + my-function: + B A + 5.315219 5.701517 + Main Gpu Ram: + my-code-block: + A B + 0.506 0.506 + my-function: + A B + 0.506 0.506 + Descendants Gpu Ram: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Combined Gpu Ram: + my-code-block: + A B + 0.506 0.506 + my-function: + A B + 0.506 0.506 + System Gpu Ram: + my-code-block: + A B + 0.727 0.727 + my-function: + A B + 0.727 0.727 + Gpu Sum Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 3.0 + Gpu Hardware Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 3.0 + Main N Threads: + my-code-block: + A B + 15.0 15.0 + my-function: + A B + 15.0 15.0 + Descendants N Threads: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Combined N Threads: + my-code-block: + A B + 15.0 15.0 + my-function: + A B + 15.0 15.0 + Cpu System Sum Utilization Percent: + my-code-block: + B A + 130.8 138.4 + my-function: + A B + 131.1 133.4 + Cpu System Hardware Utilization Percent: + my-code-block: + B A + 10.9 11.533333 + my-function: + A B + 10.925 11.116667 + Cpu Main Sum Utilization Percent: + my-code-block: + B A + 103.1 103.3 + my-function: + A B + 102.1 103.2 + Cpu Main Hardware Utilization Percent: + my-code-block: + B A + 8.591667 8.608333 + my-function: + A B + 8.508333 8.6 + Cpu Descendants Sum Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + my-code-block: + B A + 103.1 103.3 + my-function: + A B + 102.1 103.2 + Cpu Combined Hardware Utilization Percent: + my-code-block: + B A + 8.591667 8.608333 + my-function: + A B + 8.508333 8.6 + Code Block Compute Time: + my-code-block: + B A + 2.789909 2.869182 + my-function: + A B + 2.570437 2.577679 + + + CLI --- +Tracking +~~~~~~~~ + +Basics +^^^^^^ + The ``gpu-tracker`` package also comes with a commandline interface that can track the computational-resource-usage of any shell command, not just Python code. Entering ``gpu-tracker -h`` in a shell will show the @@ -553,14 +1719,18 @@ help message. Usage: gpu-tracker -h | --help gpu-tracker -v | --version - gpu-tracker --execute= [--output=] [--format=] [--st=] [--ru=] [--gru=] [--tu=] [--nec=] [--guuids=] [--disable-logs] + gpu-tracker --execute= [--output=] [--format=] [--tconfig=] [--st=] [--ru=] [--gru=] [--tu=] [--nec=] [--guuids=] [--disable-logs] [--gb=] [--tf=] [--overwrite] + gpu-tracker sub-track combine --stf= [-p ]... + gpu-tracker sub-track analyze --tf= --stf= [--output=] [--format=] + gpu-tracker sub-track compare [--output=] [--format=] [--cconfig=] [-m =...] [--stat=] Options: -h --help Show this help message and exit. -v --version Show package version and exit. -e --execute= The command to run along with its arguments all within quotes e.g. "ls -l -a". - -o --output= File path to store the computational-resource-usage measurements. If not set, prints measurements to the screen. - -f --format= File format of the output. Either 'json' or 'text'. Defaults to 'text'. + -o --output= File path to store the computational-resource-usage measurements in the case of tracking or the analysis report in the case of sub-tracking. If not set, prints to the screen. + -f --format= File format of the output. Either 'json', 'text', or 'pickle'. Defaults to 'text'. + --tconfig= JSON config file containing the key word arguments to the ``Tracker`` class (see API) to be optionally used instead of the corresponding commandline options. If any commandline options are set, they will override the corresponding arguments provided by the config file. --st= The number of seconds to sleep in between usage-collection iterations. --ru= One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'. --gru= One of 'bytes', 'kilobytes', 'megabytes', 'gigabytes', or 'terabytes'. @@ -568,6 +1738,18 @@ help message. --nec= The number of cores expected to be used. Defaults to the number of cores in the entire operating system. --guuids= Comma separated list of the UUIDs of the GPUs for which to track utilization e.g. gpu-uuid1,gpu-uuid2,etc. Defaults to all the GPUs in the system. --disable-logs If set, warnings are suppressed during tracking. Otherwise, the Tracker logs warnings as usual. + --gb= The brand of GPU to profile. Valid values are nvidia and amd. Defaults to the brand of GPU detected in the system, checking NVIDIA first. + --tf= If specified, stores the individual resource usage measurements at each iteration. Valid file formats are CSV (.csv) and SQLite (.sqlite) where the SQLite file format stores the data in a table called "data" and allows for more efficient querying. + --overwrite Whether to overwrite the tracking file if it already existed before the beginning of this tracking session. Do not set if the data in the existing tracking file is still needed. + sub-track Perform sub-tracking related commands. + combine Combines multiple sub-tracking files into one. This is usually a result of sub-tracking a code block that is called in multiple simultaneous processes. + --stf= The path to the sub-tracking file used to specify the timestamps of specific code-blocks. If not generated by the gpu-tracker API, must be either a CSV or SQLite file (where the SQLite file contains a table called "data") where the headers are precisely process_id, code_block_name, position, and timestamp. The process_id is the ID of the process where the code block is called. code_block_name is the name of the code block. position is whether it is the start or the stopping point of the code block where 0 represents start and 1 represents stop. And timestamp is the timestamp where the code block starts or where it stops. + -p Paths to the sub-tracking files to combine. Must all be the same file format and the same file format as the resulting sub-tracking file (either .csv or .sqlite). If only one path is provided, it is interpreted as a path to a directory and all the files in this directory are combined. + analyze Generate the sub-tracking analysis report using the tracking file and sub-tracking file for resource usage of specific code blocks. + compare Compares multiple tracking sessions to determine differences in computational resource usage by loading sub-tracking results given their file paths. Sub-tracking results files must be in pickle format e.g. running the ``sub-track analyze`` command and specifying a file path for ``--output`` and 'pickle' for the ``--format`` option. If code block results are not included in the sub-tracking files (i.e. no code blocks were sub-tracked), then only overall results are compared. + --cconfig= JSON config file containing the ``file_path_map`` argument for the ``TrackerComparison`` class and ``statistic`` argument for its ``compare`` method (see API) that can be used instead of the corresponding ``-m =`` and ``--stat=`` commandline options respectively. If additional ``-m =`` options are added on the commandline in addition to a config file, they will be added to the ``file_path_map`` in the config file. If a ``--stat`` option is provided on the commandline, it will override the ``statistic`` in the config file. + -m = Mapping of tracking session names to the path of the file containing the sub-tracking results of said tracking session. Must be in pickle format. + --stat= The summary statistic of the measurements to compare. One of 'min', 'max', 'mean', or 'std'. Defaults to 'mean'. The ``-e`` or ``--execute`` is a required option where the desired shell @@ -587,19 +1769,19 @@ completes, its status code is reported. Max RAM: Unit: gigabytes System capacity: 67.254 - System: 2.896 + System: 5.61 Main: Total RSS: 0.003 Private RSS: 0.0 Shared RSS: 0.003 Descendants: - Total RSS: 0.877 - Private RSS: 0.759 - Shared RSS: 0.118 - Combined: - Total RSS: 0.878 - Private RSS: 0.759 + Total RSS: 0.879 + Private RSS: 0.76 Shared RSS: 0.119 + Combined: + Total RSS: 0.881 + Private RSS: 0.761 + Shared RSS: 0.12 Max GPU RAM: Unit: gigabytes System capacity: 16.376 @@ -611,25 +1793,25 @@ completes, its status code is reported. System core count: 12 Number of expected cores: 12 System: - Max sum percent: 324.1 - Max hardware percent: 27.008 - Mean sum percent: 164.91 - Mean hardware percent: 13.743 + Max sum percent: 324.8 + Max hardware percent: 27.067 + Mean sum percent: 152.109 + Mean hardware percent: 12.676 Main: Max sum percent: 0.0 Max hardware percent: 0.0 Mean sum percent: 0.0 Mean hardware percent: 0.0 Descendants: - Max sum percent: 361.4 - Max hardware percent: 30.117 - Mean sum percent: 123.42 - Mean hardware percent: 10.285 + Max sum percent: 201.8 + Max hardware percent: 16.817 + Mean sum percent: 102.245 + Mean hardware percent: 8.52 Combined: - Max sum percent: 361.4 - Max hardware percent: 30.117 - Mean sum percent: 123.42 - Mean hardware percent: 10.285 + Max sum percent: 201.8 + Max hardware percent: 16.817 + Mean sum percent: 102.245 + Mean hardware percent: 8.52 Main number of threads: 1 Descendants number of threads: 12 Combined number of threads: 13 @@ -647,8 +1829,11 @@ completes, its status code is reported. *Notice that the RAM and GPU RAM usage primarily takes place in the -descendant processes since the bash command itself calls the commands -relevant to resource usage.* +descendant processes since, in this example, the bash command itself +calls the commands relevant to resource usage.* + +Options +^^^^^^^ The units of the computational resources can be modified. For example, –tu stands for time-unit, –gru stands for gpu-ram-unit, and –ru stands @@ -986,3 +2171,509 @@ By default, the format of the output is “text”. The ``-f`` or "time": 0.0010636144214206272 } } + +Alternative to typing out the tracking configuration via commandline +options, one can specify a config JSON file via the ``--tconfig`` +option. + +.. code:: none + + $ cat config.json + + +.. code:: none + + { + "sleep_time": 0.5, + "ram_unit": "megabytes", + "gpu_ram_unit": "megabytes", + "time_unit": "seconds" + } + + +.. code:: none + + $ gpu-tracker -e 'bash example-script.sh' --tconfig=config.json + + +.. code:: none + + Resource tracking complete. Process completed with status code: 0 + Max RAM: + Unit: megabytes + System capacity: 67254.166 + System: 4511.437 + Main: + Total RSS: 2.957 + Private RSS: 0.319 + Shared RSS: 2.638 + Descendants: + Total RSS: 894.923 + Private RSS: 781.222 + Shared RSS: 113.701 + Combined: + Total RSS: 896.135 + Private RSS: 781.541 + Shared RSS: 114.594 + Max GPU RAM: + Unit: megabytes + System capacity: 16376.0 + System: 727.0 + Main: 0.0 + Descendants: 314.0 + Combined: 314.0 + CPU utilization: + System core count: 12 + Number of expected cores: 12 + System: + Max sum percent: 259.3 + Max hardware percent: 21.608 + Mean sum percent: 160.9 + Mean hardware percent: 13.408 + Main: + Max sum percent: 0.0 + Max hardware percent: 0.0 + Mean sum percent: 0.0 + Mean hardware percent: 0.0 + Descendants: + Max sum percent: 102.8 + Max hardware percent: 8.567 + Mean sum percent: 96.529 + Mean hardware percent: 8.044 + Combined: + Max sum percent: 102.8 + Max hardware percent: 8.567 + Mean sum percent: 96.529 + Mean hardware percent: 8.044 + Main number of threads: 1 + Descendants number of threads: 12 + Combined number of threads: 13 + GPU utilization: + System GPU count: 1 + Number of expected GPUs: 1 + GPU percentages: + Max sum percent: 0.0 + Max hardware percent: 0.0 + Mean sum percent: 0.0 + Mean hardware percent: 0.0 + Compute time: + Unit: seconds + Time: 3.913 + + +Sub-tracking +~~~~~~~~~~~~ + +Basics +^^^^^^ + +The ``sub-track`` subcommand introduces functionality related to +sub-tracking i.e. analyzing computational resource usage for individual +code blocks rather than the entire process. This requires a tracking +file and a sub-tracking file. The tracking file can be created by +specifying the ``--tf`` option when profiling a process using +``--execute``. The sub-tracking file can be created using the +gpu-tracker API i.e. the ``SubTracker`` class. If the process being +profiled is not a python script, the sub-tracking file can be generated +in any programming language as long as it follows the following format: + +It is either a CSV or SQLite file where the headers are +``process_id,code_block_name,position,timestamp``. The ``process_id`` +column is the ID (integer) of the process where the code block was +called. The ``code_block_name`` is the given name (string) of the code +block to distinguish it from other code blocks being sub-tracked. The +``position`` is an integer of either the value 0 or 1 where 0 indicates +the start of the code block and 1 indicates the stopping point of the +code block. Finally ``timestamp`` (float) is the timestamp when the code +block either starts (where ``position`` is 0) or when it stops (where +``position`` is 1). Both a start timestamp and stop timestamp must be +logged for every call to the code block of interest. If using an SQLite +file for more efficient querying of longer tracking sessions, the name +of the table must be ‘data’. + +If sub-tracking a code block that is called in multiple processes, the +sub-tracking files of that code block must be unique to each process. +For convenience, the ``sub-track combine`` subcommand allows for +combining these into a single sub-tracking file that can be used for +downstream analysis. This example combines ‘sub-tracking1.csv’ and +‘sub-tracking2.csv’ into a single sub-tracking file of the name +‘combined-file.csv’. Alternatively, if the ``-p`` option is only used +once, rather than being interpretted as list of files, it is instead +interpretted as the path to a directory containing the sub-tracking +files to combine. + +.. code:: none + + $ gpu-tracker sub-track combine --stf=combined-file.csv -p sub-tracking1.csv -p sub-tracking2.csv + +Analysis +^^^^^^^^ + +Once a tracking and sub-tracking file is available, the +``sub-track analyze`` subcommand can generate the sub-tracking results. +These can be stored in JSON, text, or pickle format where the pickle +format is the same as the ``SubTrackingResults`` object from the API. If +the ``--output`` option is specified, the content can be stored in the +given file path. By default, the content prints to the screen and it is +in text format by default. + +.. code:: none + + $ gpu-tracker sub-track analyze --tf=tracking.csv --stf=sub-tracking.csv + + +.. code:: none + + Overall: + min max mean std + main_ram 0.341860 0.944374 0.856037 0.125014 + descendants_ram 0.000000 0.000000 0.000000 0.000000 + combined_ram 0.341860 0.944374 0.856037 0.125014 + system_ram 4.859711 5.553644 5.253445 0.134081 + main_gpu_ram 0.000000 0.506000 0.429920 0.170432 + descendants_gpu_ram 0.000000 0.000000 0.000000 0.000000 + combined_gpu_ram 0.000000 0.506000 0.429920 0.170432 + system_gpu_ram 0.215000 0.727000 0.650320 0.172010 + gpu_sum_utilization_percent 0.000000 3.000000 0.120000 0.600000 + gpu_hardware_utilization_percent 0.000000 3.000000 0.120000 0.600000 + main_n_threads 12.000000 15.000000 14.720000 0.842615 + descendants_n_threads 0.000000 0.000000 0.000000 0.000000 + combined_n_threads 12.000000 15.000000 14.720000 0.842615 + cpu_system_sum_utilization_percent 11.900000 133.400000 119.212000 22.741909 + cpu_system_hardware_utilization_percent 0.991667 11.116667 9.934333 1.895159 + cpu_main_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767 + cpu_main_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564 + cpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_combined_sum_utilization_percent 78.000000 103.200000 96.924000 6.390767 + cpu_combined_hardware_utilization_percent 6.500000 8.600000 8.077000 0.532564 + Static Data: + ram_unit gpu_ram_unit time_unit ram_system_capacity gpu_ram_system_capacity system_core_count n_expected_cores system_gpu_count n_expected_gpus + gigabytes gigabytes hours 67.254166 16.376 12 12 1 1 + Code Block Results: + Name: my-code-block + Num Timepoints: 12 + Num Calls: 3 + Num Non Empty Calls: 3 + Compute Time: + min max mean std total + 2.580433 2.789909 2.651185 0.120147 7.953554 + Resource Usage: + min max mean std + main_ram 0.341860 0.936559 0.808736 0.167663 + descendants_ram 0.000000 0.000000 0.000000 0.000000 + combined_ram 0.341860 0.936559 0.808736 0.167663 + system_ram 4.859711 5.553644 5.231854 0.191567 + main_gpu_ram 0.000000 0.506000 0.363500 0.225892 + descendants_gpu_ram 0.000000 0.000000 0.000000 0.000000 + combined_gpu_ram 0.000000 0.506000 0.363500 0.225892 + system_gpu_ram 0.215000 0.727000 0.583250 0.228088 + gpu_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + gpu_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + main_n_threads 12.000000 15.000000 14.416667 1.164500 + descendants_n_threads 0.000000 0.000000 0.000000 0.000000 + combined_n_threads 12.000000 15.000000 14.416667 1.164500 + cpu_system_sum_utilization_percent 11.900000 130.800000 113.641667 32.352363 + cpu_system_hardware_utilization_percent 0.991667 10.900000 9.470139 2.696030 + cpu_main_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587 + cpu_main_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549 + cpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_combined_sum_utilization_percent 79.600000 103.100000 96.583333 6.726587 + cpu_combined_hardware_utilization_percent 6.633333 8.591667 8.048611 0.560549 + + Name: my-function + Num Timepoints: 12 + Num Calls: 3 + Num Non Empty Calls: 3 + Compute Time: + min max mean std total + 2.538011 2.577679 2.553176 0.021419 7.659528 + Resource Usage: + min max mean std + main_ram 0.864592 0.944374 0.896998 0.034505 + descendants_ram 0.000000 0.000000 0.000000 0.000000 + combined_ram 0.864592 0.944374 0.896998 0.034505 + system_ram 5.203415 5.315219 5.271566 0.038751 + main_gpu_ram 0.314000 0.506000 0.490000 0.055426 + descendants_gpu_ram 0.000000 0.000000 0.000000 0.000000 + combined_gpu_ram 0.314000 0.506000 0.490000 0.055426 + system_gpu_ram 0.535000 0.727000 0.711000 0.055426 + gpu_sum_utilization_percent 0.000000 3.000000 0.250000 0.866025 + gpu_hardware_utilization_percent 0.000000 3.000000 0.250000 0.866025 + main_n_threads 15.000000 15.000000 15.000000 0.000000 + descendants_n_threads 0.000000 0.000000 0.000000 0.000000 + combined_n_threads 15.000000 15.000000 15.000000 0.000000 + cpu_system_sum_utilization_percent 120.300000 133.400000 124.566667 4.001439 + cpu_system_hardware_utilization_percent 10.025000 11.116667 10.380556 0.333453 + cpu_main_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332 + cpu_main_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111 + cpu_descendants_sum_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_descendants_hardware_utilization_percent 0.000000 0.000000 0.000000 0.000000 + cpu_combined_sum_utilization_percent 94.700000 103.200000 98.841667 2.677332 + cpu_combined_hardware_utilization_percent 7.891667 8.600000 8.236806 0.223111 + + + + +The overall resource usage of the tracking session is provided as well +as its static data. This is followed by the compute time and resource +usage of each code block. + +Comparison +^^^^^^^^^^ + +.. code:: python3 + + Storing the results of the sub-tracking analysis in a pickle file allows for one tracking session to be compared to another. + +.. code:: none + + $ gpu-tracker sub-track analyze --tf=tracking.csv --stf=sub-tracking.csv --format=pickle --output=my-results.pkl + +The ``sub-track compare`` subcommand compares the computational resource +usage of multiple tracking sessions. This is useful when you want to +determine how a change can impact the computational efficiency of your +process, whether it be different input data, an alternative +implementation, etc. The ``-m`` option creates a mapping from the given +name of a tracking session to the file path where its sub-tracking +results are stored in pickle format. Say you wanted to call one tracking +session ‘A’ and then the second tracking session ‘B’ where the results +of tracking session ‘A’ are stored in ‘results.pkl’ and that of session +‘B’ are in ‘results2.pkl’. + +.. code:: none + + $ gpu-tracker sub-track compare -m A=results.pkl -m B=results2.pkl + + +.. code:: none + + Overall Resource Usage: + Main Ram: + B A + 0.856037 0.861921 + Descendants Ram: + A B + 0.0 0.0 + Combined Ram: + B A + 0.856037 0.861921 + System Ram: + B A + 5.253445 5.281926 + Main Gpu Ram: + B A + 0.42992 0.448364 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + B A + 0.42992 0.448364 + System Gpu Ram: + B A + 0.65032 0.668909 + Gpu Sum Utilization Percent: + A B + 0.0 0.12 + Gpu Hardware Utilization Percent: + A B + 0.0 0.12 + Main N Threads: + B A + 14.72 14.757576 + Descendants N Threads: + A B + 0.0 0.0 + Combined N Threads: + B A + 14.72 14.757576 + Cpu System Sum Utilization Percent: + B A + 119.212 121.918182 + Cpu System Hardware Utilization Percent: + B A + 9.934333 10.159848 + Cpu Main Sum Utilization Percent: + B A + 96.924 99.060606 + Cpu Main Hardware Utilization Percent: + B A + 8.077 8.255051 + Cpu Descendants Sum Utilization Percent: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + B A + 96.924 99.060606 + Cpu Combined Hardware Utilization Percent: + B A + 8.077 8.255051 + Code Block Resource Usage: + Main Ram: + my-code-block: + B A + 0.808736 0.846999 + my-function: + A B + 0.888034 0.896998 + Descendants Ram: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Combined Ram: + my-code-block: + B A + 0.808736 0.846999 + my-function: + A B + 0.888034 0.896998 + System Ram: + my-code-block: + A B + 5.170665 5.231854 + my-function: + B A + 5.271566 5.476632 + Main Gpu Ram: + my-code-block: + B A + 0.3635 0.415429 + my-function: + B A + 0.49 0.506 + Descendants Gpu Ram: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Combined Gpu Ram: + my-code-block: + B A + 0.3635 0.415429 + my-function: + B A + 0.49 0.506 + System Gpu Ram: + my-code-block: + B A + 0.58325 0.635714 + my-function: + B A + 0.711 0.727 + Gpu Sum Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.25 + Gpu Hardware Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.25 + Main N Threads: + my-code-block: + B A + 14.416667 14.619048 + my-function: + A B + 15.0 15.0 + Descendants N Threads: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Combined N Threads: + my-code-block: + B A + 14.416667 14.619048 + my-function: + A B + 15.0 15.0 + Cpu System Sum Utilization Percent: + my-code-block: + B A + 113.641667 120.142857 + my-function: + B A + 124.566667 125.025 + Cpu System Hardware Utilization Percent: + my-code-block: + B A + 9.470139 10.011905 + my-function: + B A + 10.380556 10.41875 + Cpu Main Sum Utilization Percent: + my-code-block: + B A + 96.583333 98.652381 + my-function: + B A + 98.841667 99.775 + Cpu Main Hardware Utilization Percent: + my-code-block: + B A + 8.048611 8.221032 + my-function: + B A + 8.236806 8.314583 + Cpu Descendants Sum Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + my-code-block: + A B + 0.0 0.0 + my-function: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + my-code-block: + B A + 96.583333 98.652381 + my-function: + B A + 98.841667 99.775 + Cpu Combined Hardware Utilization Percent: + my-code-block: + B A + 8.048611 8.221032 + my-function: + B A + 8.236806 8.314583 + Code Block Compute Time: + my-code-block: + B A + 2.651185 2.68558 + my-function: + B A + 2.553176 2.559218 + + + +Both the overall usage is compared and per code block. The default +format is text and the default output is printing to the console. The +``--format`` and ``--output`` options can be configured similarly to +those in the ``sub-track analyze`` subcommand. By default, the ‘mean’ of +measurements is compared. Alternatively, the ``--stat`` option can be +set to ‘min’, ‘max’, or ‘std’ to compare a different summary statistic. From 3445bf58944ee3f02e42501e3530498a9302d6f0 Mon Sep 17 00:00:00 2001 From: erikhuck Date: Wed, 7 May 2025 20:03:15 -0400 Subject: [PATCH 3/7] Completes tests for the CLI --- docs/notebook/config.json | 2 +- src/gpu_tracker/__main__.py | 17 +- tests/data/cconfig.json | 7 + .../files-to-combine/1723811.sub.tmp.sqlite | Bin 0 -> 8192 bytes .../files-to-combine/1723814.sub.tmp.sqlite | Bin 0 -> 8192 bytes .../files-to-combine/1723815.sub.tmp.sqlite | Bin 0 -> 8192 bytes .../files-to-combine/main.sub.tmp.sqlite | Bin 0 -> 8192 bytes tests/data/sub-tracking-results/results-A.pkl | Bin 0 -> 11883 bytes tests/data/sub-tracking-results/results-B.pkl | Bin 0 -> 11883 bytes .../data/sub-tracking-results/sub.tmp.sqlite | Bin 0 -> 8192 bytes tests/data/sub-tracking-results/tmp.sqlite | Bin 0 -> 12288 bytes tests/data/tconfig.json | 7 + tests/test_cli.py | 166 ++++++++++++++---- tests/test_tracker.py | 2 +- tests/utils.py | 4 +- 15 files changed, 158 insertions(+), 47 deletions(-) create mode 100644 tests/data/cconfig.json create mode 100644 tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite create mode 100644 tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite create mode 100644 tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite create mode 100644 tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite create mode 100644 tests/data/sub-tracking-results/results-A.pkl create mode 100644 tests/data/sub-tracking-results/results-B.pkl create mode 100644 tests/data/sub-tracking-results/sub.tmp.sqlite create mode 100644 tests/data/sub-tracking-results/tmp.sqlite create mode 100644 tests/data/tconfig.json diff --git a/docs/notebook/config.json b/docs/notebook/config.json index 6675ff2..6a866e5 100644 --- a/docs/notebook/config.json +++ b/docs/notebook/config.json @@ -1,5 +1,5 @@ { - "sleep_time": 0.5c, + "sleep_time": 0.5, "ram_unit": "megabytes", "gpu_ram_unit": "megabytes", "time_unit": "seconds" diff --git a/src/gpu_tracker/__main__.py b/src/gpu_tracker/__main__.py index 58b4195..c4b6db4 100644 --- a/src/gpu_tracker/__main__.py +++ b/src/gpu_tracker/__main__.py @@ -57,26 +57,25 @@ def main(): results = SubTrackingAnalyzer(tracking_file, sub_tracking_file).sub_tracking_results() _process_output(output_format, results, output) elif args['combine']: - directory = None - files = None files = args['-p'] if len(files) == 1: [directory] = files files = [os.path.join(directory, file) for file in os.listdir(directory)] SubTrackingAnalyzer(None, args['--stf']).combine_sub_tracking_files(files) - elif args['compare']: + else: if args['--cconfig'] is not None: with open(args['--cconfig'], 'r') as file: config = json.load(file) - file_path_map = config['file_path_map'] if 'file_path_map' in config else None + file_path_map = config['file_path_map'] if 'file_path_map' in config else dict[str, str]() statistic = config['statistic'] if 'statistic' in config else None else: file_path_map = dict[str, str]() statistic = None - if not file_path_map and args['-m'] is None: - raise ValueError( - f'A mapping of tracking session name to file path must be provided either through the -m option or a config file.' + if not file_path_map and not args['-m']: + log.error( + 'A mapping of tracking session name to file path must be provided either through the -m option or a config file.' ) + sys.exit(1) else: file_path_map.update({name: file for [name, file] in [option.split('=') for option in args['-m']]}) if args['--stat'] is not None: @@ -154,5 +153,5 @@ def _process_output(output_format: str, output_obj, output: str | None): file.write(output_str) -if __name__ == '__main__': - main() +if __name__ == '__main__': # pragma: nocover + main() # pragma: nocover diff --git a/tests/data/cconfig.json b/tests/data/cconfig.json new file mode 100644 index 0000000..dc26167 --- /dev/null +++ b/tests/data/cconfig.json @@ -0,0 +1,7 @@ +{ + "file_path_map": { + "A": "file1.pkl", + "B": "file2.pkl" + }, + "statistic": "max" +} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..9738eb51bb4b6b013b8d3c973b5c8d597ab1b558 GIT binary patch literal 8192 zcmeI#yGq1B6b9g#>?I~3V~QYHO{aovV`C#2mw17w7=vyhEZHPNbaP=d?G{@LzJq3c z1K+{M!g@O&z&EgPBCMFo%F6#Rhs-(iS6`R2QzsS?zsg35FZd=|B9xLH&Iuu^Y(@6G zsJt}igZx(WZ>5sX_3NTcQ8p&>pdbJN2tWV=5P$##AOHafKmY>&Phdi*x@?+s5(s}V zj6z@dRV!WBwmh4A)~;joYQxubEgxk;R22O<Gi4T-SmujQWFN7TokxKZ*Fc z<#rFOuctgKVi9NQ&$x(_s1SaV^F8Op^7QrU-@MK42zzHU_ROBxy&O;wfB*y_009U< z00Izz00bZa0SNq|fYEL>n5M2Al|5^wTs}JLi_wy+h40n0*Sj^Ewbb-tH8mQwR2jT& S8A>fx4$KdO)>037XZsU8K2m7_ literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..09bd6267fd469b5f09387a8c255d081b2342e559 GIT binary patch literal 8192 zcmeI#Jxatt6bJB`>_<#MW>YMp)szZiV=Wk$tP0V^7#BeyM9eR;QR)l6Nm_(bvduXmM3t?`o)?vu z`h1Yza{jGUvT^ykC{vW(5P47#fB*y_009U<00Izz00bZaf&V8kCRAN&x9K<(elUn5 zU-)G!9mg_Vi@WBIZSiu$S9L8PWno+tUJ~*BL)Y51oOP~iVHU++Fv!9yZ|J8n@0(6% z&-{AIvmy~mHvAbENg5ZzPjlY2d#02wuoGIrI-8X79YL(O^x;`_M QN@^0c-VItw-RHyP6DH46VE_OC literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..f6e5691b15b07365a881053130e04d2e5ae70cae GIT binary patch literal 8192 zcmeI#Jxjwt7zgl6+S;Zd=M6=0@g`9a7Y79wV~wv>u?DFilr)VP+ccp$aPS2>xQSoD zN!#idaPbp3xCr9X$uHn{@FG;qB+mXH?&0p9`%k~^cD4(yaQK1W^en-r$q-Q#GS4|7 zM3b$`?w2AD$^J#|(fzm5$kfrxpiC+BjL3z800bZa0SG_<0uX=z1Rwwb2>d^R7E#i} zxt!9ng;l9LRZCb=tNEg#n+7-ag@VDO2_MgDLDRRLFf6-OzPf1|%SLgMXEoceI^{~; zw`=8wrh5P$##AOHafKmY;|fB*y_0D(UXuuLjJwe-lweM#@B`*)*{$1E*JneS0|_F|3= t#8Tav#Y>ior8?{8GfT!&owc!ZmWZX=dvB*qjiuV9_BB&tsoUW7{S%GJUg7`% literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..c061862fb8650aa48635edcab34087dbc7302342 GIT binary patch literal 8192 zcmWFz^vNtqRY=P(%1ta$FlG>7U}R))P*7lCV2}i2CLo3ZMj(R)#sShGJ|Q5x#lZiT{}%rN{$>2#{001>{8s$({49Jg`Ofie=9|G+%a_FG&ZomC$oq}= zHtzx6r9cg%U^E0qLtr!nMnhmU1V%$(Gz3ONU^E0qLtr!nMnizi5RjB$WtQh;m-6*5 z$t}<;sI)S+aJRR!nuvbzLyRO;0Ur7pJ*EOqkizGjGT_FaQl6=^81vA{1hkibn&KDC;)@<^4b6Z literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-A.pkl b/tests/data/sub-tracking-results/results-A.pkl new file mode 100644 index 0000000000000000000000000000000000000000..1999d1b60b403dc193ee96f797ed43f467b8b274 GIT binary patch literal 11883 zcmeHM4OA4@6<%;5Ac%l~8WmCe0krE68vnB+*eF>;L`1NN%j`1j?zn&M%m^ewFsTa1 zF^zFNJtn7#CsuoMtcf+%+G_l3>dz5Ut+lC*rWhNKe>MKZh)LDn_hx7Fc35B)V{=Y< zd(M0F?z{KCH*@cM-@R{ZwQEat8eGJl7PnyJJ+yft>DvPLp!mPy$m zCTWEn#A1~F{$>Uwbke8&~+}vmHL&23n&r7EI5-H6Z zFNSkw+D>~w$eJvMmiviKR*7jqEIVTB@GWbu>UZd+V9C_9K54weuAz1<aXPOOkQ?*7`ccUXJ1HoZ~7>b zG^Oh&8{AX$BxyF1o9ZM7`BbJD}1w{EpcY6H9WtwETVTK$7$i^p3a8B&l-G+fR=8MTSiB+A!46<$-N@w*2mt*fUL}4M&}rUK8HE zJ2~6hy2cZCRz{PgMb(@4nfFAIq=&F!ZmVa0tIDpCYo%-AhkV}bXR#$8r4G-!Gn!o3 z@cNQtn?EZdNv`6jn+{#{J50AUF^#K6PA9o0{B6Z<%jFo7RC+4>M+M2HB&oLHgYyTg z2a}{ME8X-@>s*pFw`pl@#@T){$!oDL1}$kZUIH8yd-;b8WlSZ*=}a^SK}J`jlc_M~ zIDrNSFPj&l(THJ67sV*rGKfN?mglb)4r6@!id`Hwrvx=!DT!JKL!%DImr3||*E&%w zlPe%JX7fQiTL2X(W(%QhsW#E4Ho`&MoJI(LAVeu+I5PtghLh)HO0?PKFxi|;ncVHl zRdGDy@Qz_Y6p>-!9;C338tYTwBgJ@?_y~Q_6DK%~0&lb1UZ8oK%V~5o9#{%5i79B* zN_)yGXb%&ZB?hm=Y2>XQhAsnyOC;Aq(BplpA&3LbXnOg@T3y9Zf~oDrq##bbpjMR) z_OZ0B3QtPDN~;=&BaI|y+lmJvcHg&XRcpE*TbnA86S5|7HQ5CjrrRw2fdJC0*KHU_L8o-aQr(Y!Uv8Vv~oWCnId6|X~rkX91ZhKclTn4a7sJ8d^x z8Ap{49(9bJae$}7#8Ha`7Y};{@5MEe7y%mtqJA4M<>Gz09=?wl?e+T>1ChMV0S>NW z?}2o9z0*v)X|s*5BGuuPs2!d-MIN#qFuMTTidayr)5ui188ccrN<=unL1MGEknB!K z!drO-Rjxpn1US{c``iD48rWiRN|t$(R7gYRTpHlb zsu5$eVFYg>b&ieqz8F#@i7}9=8j&lzU6B29k|A1*L?lQsVGkR+iak)z2}eBRaPw8) z(Gk|T1=#LLSYlVha^(Eo8pW=aJ3;Q%FlDc@B_PKxK$Q_yrBI1syo;yp7&eI$E1;Vq zMd|g~Y2YxmE6zt#(574vVyMQ3265E6ZdB6g!>_;Bdz&8U&G)usQBv@})8-o4sL()u z!k&lqBTT=5UPcQ%>_vFO-oai$w0K1OH#}jV2y5vR>~+M3{DHlJs#ZNA7aSo5tPi`; z45tzY7ao!|LNSOnCQz=Z#WxV?by@3S8OjBLq4BgKliHyl^u@7$1EIaXXZ7lmguPF{ zx@v2zea2C}k~}V)nO>UwX#(|ok9hjq;y6mVI#4pnKYSgZ{HK12RBv&xgvOP71SPGk z-Ib9#sn|db`EX~}f=jW;R^Mw;Qc$J+)SJ$nnYjDnfE}+*el(NoC|QrXRkUY$$JMQO zyf#N3SGO>X?Irf}Zs97HKu?le>yCQ?(LI#>Te-+{7T z(DSh6jdf5?&84tBq`9dBp>%and2io3{MPUSs1@gh1{l2(7&>5pT;)BuG8Ib#;_s)R4wOpu!0XdEe*yTP1sA~-Y z1e0K5@4%>h-h07)p2Xe-njw_Ehnf)fzT9XK=zL#r9S{lwg_HzV@2hXlV^P*T+`p?e-*v-$OZKM+AQtK zo6pZ2^!e=>l&7vw*(@Q4V#ke{w(hAMDtb}l{lekr+tuIZe*HQ+XZDTAy{U(s`btsk z4gG~#lQfml?;v`bOB(WcbLxfwIDP7Wi_-;{oSPHYT-GPV9ei`qps(~HPiB6dbnUwS z#E0jHz1(GgyXSO!@@X$#J~(=G4z*$XYnQe!%c1_0U3zS6Q4STh;+^-GJa?pB{lOw# zYkX>aR{E{+KO9}bRdpMzHnp76_vX>?e?Qh#R7EwQvd=*mUBRgtX6T1eeJ`R7AFZP3bj@xWv%4wbwVS*@?*fdxkfFg^cDI0V}J9@ z&>!be>Q)&KsMbfscJlRFKD0lsz_PlPb+s?wa!!(>$Sbg_Zk2IThJ04W)pAO|xdy}X zmH9F}6RoVJxdx}hGg9TWm}r%8+?IN;tJN*o7sLC>elk20t)INy2~aER4={vD-f{K3 ZNOcfY&REW7$4iFQra2f|l8S;B(XXzcSq(#EmRZ**| z|IF|I|NNQ$-S7Wim4>gKrq{_$Y+LN|P=cE_PiNeE-eXd)B{6ZF$21-f*tp`c4DYcE zytGD2UnI?!(!>{>GZ{B+w@YFR7tL8{UT=1~8GVtPb}*6{J%kqMVe++4Bb;&+yCpH1 z6t{5#H_E8e9OzqNzL=5mEsZgJYfk;y>dqZahqy4?+>%&_=wWwY&dkQQoha*O2Pw$eQm5EUlFK z6;cYx_uKM2Xl!Sa6@;6rEf7_m2+S(NZBqehzFKbW6&3>%CPtY&Hi$0{f;NJ;+srsX zj5-`PI6jP_xVN^jwz9Uxab$*|vk`6p3zd3*Q%E8H3yOIw4_raL{ zpT>40SugKCySU%#e3I4Ruh6iGhY4$5b6UI9WMybr@ZEp)7BqSR>3s{_1`i=w@>F~_ ziDV7IZE*_8>X!lS@C=f*9-H4gkYv>-g{Rf(h8YZf>N#9?uX<(l(u?FAwqGzLyxMRqDWS=fo)jkP#(!@@Ele*Xz{{STpzP1dC9dNq!YR4=d{{BybsBGCFaZb zT(1~N7FHW`%1*N8f!g^&86<1!s{QxUruuDAime1v>MC|(`4p1%n*qmcVTGwA>vP=B znMu~iA3>YjU153WFw_|K)N?rKQFQjw`{&6y%-xa3Um6-uKJC$; zs<3F1)qpw7uXWF_)z~f4S9)P$*9-UkW^BP7@H7x{$1y=lUk=BKy8%~;-j#W07Blc1+(OJcZ#q45nbSR{FS59e?$lkdT3%#H)=Yyos&n4Ji; zrrFj$vr!J(#!-;|K#F2vcrybT1}E?eC(i72m~0$lQEp@S5?){&-Z5DbXJoQ+OH$d# zP4F4;vEsc(e2gS~#d{o-N3hv#b7;Zl(w7^hy+Qis-sSGlHF_sq~K+KeELQEuA9M8CI49tGy zIORgk=B=^TI4BUHGO#n6d>smew8EekOtf#owB={mX}j6VI7;+zsb}nr10oe396Q6~ z6kx5$YjKSvM!~{>tluU``SQAa2fmLO=Z*Us1JQ!b0RgTNZ;5Sqqti^gXtPZyA~gT!L3q1l|!gtzi4YC?f746v&``I^zi zwqn{;BH)Q5uOqBZ4}^w-GCA9}Jj^*Zn%t)k8V8mSp4QvPg4`}nleGnA!5qj<&8b1k zkA>X_pM%*%eQ&|O2PW7}xU0n7R^08t-5+qb3wL{P_Yrj3?}MYix4ut5eIL144W|J$ zBz7H0iji`q$OE`wh=`m?+dX(}KOT#M3YCIF626fe68UHajy$x7$4haEtnOVA>@v8+ z%T0+)nk|R>`4YQaAuE8C@$6k7Mjb3M*d;CAA{EL|rIrSGv+Bfz92k+8khaE_*S;7s zNfP6sQl)SxyPQz{@{%!5jK(Y|FkuZFW5kyDj)N_pakzw%Cwz=GE)OhstSqq=;Eqzi zTVvU^%1w}KH9}phY$52euj7uw-8AUL2*D}Pb{RH_9V=j%VngZWfc`oiIuOwe0iAZ@ zW;}voV8n+#;?c=%iMxAD-emwjH~wrkl7f$&I(=kgLj%PLdkOrX#L*(Aqb4_viuR>N$d1R&I=J^^2=(ji{>f%}#Bm zB;dLFUYlBis`RY4oT(nP?@FiLtFxZX!}Un9qou0#Z11?1ddEvgDC1fR!*W4eX~wZE z7q)UW`>ue#%Y1F{X7>a%dls~T-H}@5wzL1GjeXHi(WY;Uw)Vf#L9y$7OhbrLDo;Ra znLEA!+DDZQF!q`fa$ok|l#tg}3pFw!m$L$PsSg3lYC`~z$zx(4z^Jm{d%=CO z#C``nV<`IwlMwc?LUb5(zAv~A2!(-CN&>4(dvADzF0s@t@HxSp|Ni?0Ck^U(ZpFxos_Yea3}?2Sm^*jp9sK?3Z1w3cc#w2+Z(JUFoU@(U zRGEkVoXm9VJtYs#kD%JF`1o4A_Un9j5Q8c%YiSUwPa)DBDZ*MBtO>22dhgYD(Wn>$ z-6oTQld&e0o;duR=uA?ci@Kq&WS~q#w{Ad-PeTIT>vCb<1%r%_@T-7=lamxWU?SR6 z)Us;swoVGZR<}p$`E*}=D1A2>ZOCvIJgoRvqvxlw&u_wKIHQqI)|w8?{!b3f8i&u( zg2SbEl?=)l=*8(61*c;aoQ`3CbU3ZMvi3IKXG$x&-og9Tu+-zX4f?GwrY@gdhWV%1 zq41;P^Q@^q4}jCBe^{LUXD`E%Q3tLWjve@N$-Wui7&=wnxcJ6fw+-8VKkkojpR1~O zpRP}T?9hFg>rakAS6g)4;nwG&dwsI4NxD3=GBf7LPmUg~*S;628cekKe8yWxU&zYp zwz|6X8P(A$u4-wpZfo__dsTm$Qb+H#D72hLxs{`b6|7H3xU4NGcnNh*g|?vw!X(wy z4J}DpemXPpwt~||6VMU-Q=xql!aqpP5hkLyKd3&jvwyf3r#qnXGHAIe=(C?wJs%A1 zi2inK9+!RRP^0HZ#_2{pzX_+Ca9aJXt_i38&(s6f(NSKUR$)5i$6iiX^#*u`jZRB} zC707O|GC0+uP``q&>VedVcxSk`hNOy`tr}d-nhth-JqokK*--o#fkv*{z9#udavrw z`0C7UPedNlQgu9lQG!+f^9kVqm-iQHVOqUcgd1;Pv7^GOmIiBItEb+3_2J+01pcR6e3(A$G*P0ZLeMKv`UEPhEi4iD7 zAHGSD50Q797Crc24-xecg%A25ln)v~(YMiCL_rVTeFz>p&}+~+?7;Rvzu(>XabDJW z#O_yp(#cSNz@iPhCUva>j`}^TYXgr+$?A=h4MB2dy@GO9JTQaFuEsn*oHzUz1g-5P(mg16JHnf~PH zEp$s5{e&gDmE&>a5~?-%)I5eaQ?1D-KR@DPx-}3xkBjJ5B=QPxqFc2i_i!QI+SMat zOts=)^5O*rCPVSsyRkcQHR7Ka-WTNSCu^@p>O@LkJR-*1b5006}RE zZ5)XNHK9Czjf6B6PyVjh{ueyTNmHp&^k`!{$|k627k-Aa2ukywJ&rO7N^|EfAu~Zu zC_z73m!`DTH`>kf$dsm1qo~7i6&VRCdaNvn3Lg3{cUUx+8D38kv>H@Mqe*oC`$SJ)*M zqbbqGwm#Z)#4&9Z$pdK3SfevBsjU`C0mU*hC5dfD8jZ+k3{9+9YwtPtvfjP8LTCC% z|G2-|%kTWoIrn_d`JUf7yOb0c*cgkprp{@n7;T1Zj*K8=pw-G`G7bC8VxLKqvxT6^ zfc1o+9H^*~rGIZAjBS;N9F>J%VE;%aDIh5zDIh5zDIh5zDIh5zDIh8E-zd-$D_2BE zMG-q=7^=F~Vx|}hJ+pGw=Vq7YYD=?M7vySDjW$)SvQsvP!AaS*YYWz8moCw&Rc4FJ zWO0}&2jlW@FxA*{snEc;+)28uy&}z`_li(fGm_W{&a6HzS6x_sjr{&2Diql)awI z33b}mC4?e6A%S=~9-j`jjj@>w=r{<9a=)`8Xh27XJuL>e!^UXKv)AY7vT=^^**T>i z;ipuh0r-qCQT3F`#(4ZQ_K_VX->hRZ+EnLevt5v@4uhq!-eO|dQ)*&6_~o(SAU*0m zb}7Es)M@B{b0VTw#(pG|6p$2<6p$2<6p$2<6p$2<6p$2<6p$2<6p$47|5qS`P{+qd zD-;BwQdwGe#LE%N@SWjv z!hRYyKlI&D2ssm?Q}?MisIIFTRkM|Ql<|sA#fsqW;F8&&&b9|lu&+oaDIh5zDe%8g zfR2e)C>08&QprAql2EY5mK6$eJUF)I&##`I^q~L2vC&<}4w1cwM@nv=8x9KtPbHoK zz-ML1pdz>i8N?mUKqii}Kay9yb>@ROdVV0}&7mjv94z{RKcVzB+eHi0te`21yIKRh99=E{s7_*$1K+BU}IU zJ{=hddCUF6K;*>%lJ}t3ceah4{GkA8v~fcxU0%f66Hpn0;#Sbk#U_q1a^1&m^kadL zUmiGkXmt0_NZy0KH1n5Ni@y*cO(rf~(uNUgA5?mfW0A!?(m%$?pvv>~ygO=$dsTp$eD&)-XlvQM6~zeo#(Pm!^xS~Rl+Sr)v)j)cN>Xfglzv?%d>QNAmp7DMLip`Z<4$Ry}QU9yWJ^3nuhVRU~vOB zD8q}c0Szy#D8|UWAIj*kK*+y-*kH64zew^Pbhj@j7F;n2kkx6Nkg1sn*$B_`NG-aI z*b7e-W2Ej>O>HRqd(?NW6YRCl-tG$4k?%rA{wMwUpuBR=cz(wqsA}`o&VQ%Fb2-f| z2d)_>(GiImu%R8ntdQuytSX2@m+spgz5=2hzL&1?hD@a)`~zBWapy}bLi!+hXkzgv zTT(%PspuEC@7xs9jJvq~OOE3A4~xyF5Z1a8%Kc-^?#jFGRZqhv$DSKZK0ADZykS)J zmvtn8?!NVO!{$;U+nI<`sUMa<&EEq@xir>db^}!U$Cz!@rh8S>up#N!g{yy8;(?^| zzv@a|x(mKNy8dEc$b2EYF@$4*0>hyGYrMtq%mMBu7R8u7@5vHI>CcvPU)uuBHOrb( zHn#l+H0~$<_3VaAP;fQZb#L<@1#F@ivUs)W-CUk!)^bXSVnl8G{fZk#g+CSLm7I_h zzMUp-X*91bKdBN9f@5UR8$6ZZ zpG(Q#nb)~8M+M1EHIKJxwNRLmTGf7p6cU&=ZWSwAxD2l6ucv2RoYtZkv*X_x^3I-) zz2Gkz@10{Q5WC>?y571nDA2{d_R7bxLiVO|ZvR&GaQ71ZIscR8Q_hQG#6Hw}>#jFw z8a8<~{A*8I&k6Ec;#2X*Ta!Rv*F3s^v`ff#KgO*uD+cZ6KA`t=ai-^?F=l&0C%m%) zvdJ5JyOtb*l#07OQLpZV6i?w>;VKFWzqM33c;qL3Yzi@2A!{q%RiICzJ?MkXcBF?z z9AkEB?G~?mI(AI)>hAJ0yCG)p=iBp6WkLS?dBtT{_6ykzW>{e*mV5^|*Kqs4{JY$j ztSCn8gBM2*dWmV+WUsDv&XFJen7nZAx7(U8^nh;LAL2I`+lA~cAE5Qsz}gZ{Yx@|- O&KQ4ajM?A+XyiX&8PqQT literal 0 HcmV?d00001 diff --git a/tests/data/tconfig.json b/tests/data/tconfig.json new file mode 100644 index 0000000..607462e --- /dev/null +++ b/tests/data/tconfig.json @@ -0,0 +1,7 @@ +{ + "sleep_time": 0.5, + "ram_unit": "megabytes", + "gpu_ram_unit": "megabytes", + "time_unit": "seconds", + "n_expected_cores": 2 +} diff --git a/tests/test_cli.py b/tests/test_cli.py index 2ec42ca..dd23414 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -4,7 +4,7 @@ import utils -@pt.fixture(name='format_', params=['text', 'json', None]) +@pt.fixture(name='format_', params=['text', 'json', 'pickle', None]) def get_format(request) -> str | None: yield request.param @@ -14,60 +14,82 @@ def get_output(request) -> str | None: yield request.param -test_data = [ - (['-e', 'my-command', '--ru=kilobytes'], ['my-command'], {'disable_logs': False, 'ram_unit': 'kilobytes'}), - (['--execute', 'my-command arg1 ', '--disable-logs'], ['my-command', 'arg1'], {'disable_logs': True}), +execute_test_data = [ + (['-e', 'my-command', '--ru=kilobytes'], ['my-command'], {'disable_logs': False, 'overwrite': False, 'ram_unit': 'kilobytes'}), + ( + ['--execute', 'my-command arg1 ', '--disable-logs', '--overwrite'], ['my-command', 'arg1'], + {'disable_logs': True, 'overwrite': True} + ), ( ['--execute=my-command arg1 arg2', '--st=0.4', '--gb=nvidia', '--tf=track-file.sqlite'], ['my-command', 'arg1', 'arg2'], - {'disable_logs': False, 'sleep_time': 0.4, 'gpu_brand': 'nvidia', 'tracking_file': 'track-file.sqlite'} + {'disable_logs': False, 'overwrite': False, 'sleep_time': 0.4, 'gpu_brand': 'nvidia', 'tracking_file': 'track-file.sqlite'} ), ( ['-e', 'my-command', '--gru=megabytes', '--tu=days', '--gb=amd', '--tf=track-file.csv'], ['my-command'], - {'disable_logs': False, 'gpu_ram_unit': 'megabytes', 'time_unit': 'days', 'gpu_brand': 'amd', 'tracking_file': 'track-file.csv'}), + {'disable_logs': False, 'overwrite': False, 'gpu_ram_unit': 'megabytes', 'time_unit': 'days', 'gpu_brand': 'amd', 'tracking_file': 'track-file.csv'}), ( - ['-e', 'my-command', '--nec=3', '--guuids=gpu-id1,gpu-id2,gpu-id3', '--gb=amd'], ['my-command'], - {'disable_logs': False, 'n_expected_cores': 3, 'gpu_uuids': {'gpu-id1', 'gpu-id2', 'gpu-id3'}, 'gpu_brand': 'amd'}), - (['-e', 'my-command', '--guuids=gpu-id1'], ['my-command'], {'disable_logs': False, 'gpu_uuids': {'gpu-id1'}})] + ['-e', 'my-command', '--nec=3', '--guuids=gpu-id1,gpu-id2,gpu-id3', '--gb=amd', '--tconfig=tests/data/tconfig.json'], + ['my-command'], + { + 'disable_logs': False, 'overwrite': False, 'n_expected_cores': 3, 'gpu_uuids': {'gpu-id1', 'gpu-id2', 'gpu-id3'}, + 'gpu_brand': 'amd', 'sleep_time': 0.5, 'ram_unit': 'megabytes', 'gpu_ram_unit': 'megabytes', 'time_unit': 'seconds' + } + ), + (['-e', 'my-command', '--guuids=gpu-id1'], ['my-command'], {'disable_logs': False, 'overwrite': False, 'gpu_uuids': {'gpu-id1'}})] + + +class PickleableMock: + text = 'text' + json_str = '{\n "key": "json"\n}' + + @staticmethod + def pickle(): + return b'\x80\x04\x955\x00\x00\x00\x00\x00\x00\x00\x8c\x08test_cli\x94\x8c\x0ePickleableMock\x94\x93\x94)\x81\x94}\x94\x8c\x03key\x94\x8c\x06pickle\x94sb.' + + def __str__(self): + return PickleableMock.text + @staticmethod + def to_json(): + return {'key': 'json'} -@pt.mark.parametrize('argv,command,kwargs', test_data) -def test_main(mocker, argv: list[str], command: list[str], kwargs: dict, format_: str | None, output: str | None): + def __getstate__(self): + return {'key': 'pickle'} + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return self + + +@pt.mark.parametrize('argv,command,kwargs', execute_test_data) +def test_execute(mocker, argv: list[str], command: list[str], kwargs: dict, format_: str | None, output: str | None): + _mock_cli(mocker, format_, output, argv) argv = ['gpu-tracker'] + argv argv += ['-f', format_] if format_ else [] argv += ['-o', output] if output else [] mocker.patch('sys.argv', argv) process_mock = mocker.MagicMock(returncode=0, pid=666) subprocess_mock = mocker.patch('gpu_tracker.__main__.subp', Popen=mocker.MagicMock(return_value=process_mock)) - tracker_str = 'tracker-str' - tracker_json = {'tracker': 'json'} - tracker_mock = mocker.MagicMock( - __str__=mocker.MagicMock(return_value=tracker_str), to_json=mocker.MagicMock(return_value=tracker_json), __enter__=lambda self: self) - TrackerMock = mocker.patch('gpu_tracker.__main__.Tracker', return_value=tracker_mock) - print_mock = mocker.patch('builtins.print') - cli.main() - TrackerMock.assert_called_with(process_id=process_mock.pid, **kwargs) + tracker_mock = PickleableMock() + TrackerMock_ = mocker.patch('gpu_tracker.__main__.Tracker', return_value=tracker_mock) + print_args = [('Resource tracking complete. Process completed with status code: 0',)] + _test_process_output(mocker, format_, output, print_args) + TrackerMock_.assert_called_with(process_id=process_mock.pid, **kwargs) subprocess_mock.Popen.assert_called_once_with(command) process_mock.wait.assert_called_once_with() - if format_ == 'text' or format_ is None: - tracker_mock.__str__.assert_called_once_with() - output_str = tracker_str - else: - tracker_mock.to_json.assert_called_once_with() - output_str = '{\n "tracker": "json"\n}' - print_args = [('Resource tracking complete. Process completed with status code: 0',)] - if output is None: - print_args.append((output_str,)) - else: - with open(output, 'r') as file: - assert output_str == file.read() - os.remove(output) - utils.assert_args_list(print_mock, print_args) error_data = [ (['-e '], 'Empty command provided.'), (['-e', 'my-command'], 'Command not found: "my-command"'), (['-e', 'my-command'], f'The following error occurred when starting the command "my-command":'), - (['-e', 'my-command', '-f', 'invalid-format'], '"invalid-format" is not a valid format. Valid values are "json" or "text".')] + (['-e', 'my-command', '-f', 'invalid-format'], '"invalid-format" is not a valid format. Valid values are "json" or "text".'), + ( + ['sub-track', 'compare'], + 'A mapping of tracking session name to file path must be provided either through the -m option or a config file.' + ) +] @pt.mark.parametrize('argv,error_message', error_data) @@ -87,3 +109,79 @@ def test_errors(mocker, argv: list[str], error_message: str): cli.main() assert str(error.value) == '1' log_mock.error.assert_called_once_with(error_message) + + +def _mock_cli(mocker, format_: str | None, output: str | None, argv: list[str]): + argv = ['gpu-tracker'] + argv + argv += ['-f', format_] if format_ else [] + argv += ['-o', output] if output else [] + mocker.patch('sys.argv', argv) + + +def _test_process_output(mocker, format_: str, output: str | None, print_args: list[tuple[str | bytes]] | None = None): + print_args = list[tuple[str | bytes]]() if print_args is None else print_args + print_mock = mocker.patch('builtins.print') + cli.main() + if format_ == 'text' or format_ is None: + output_str = PickleableMock.text + elif format_ == 'json': + output_str = PickleableMock.json_str + else: + output_str = PickleableMock.pickle() + if output is None: + print_args.append((output_str,)) + else: + with open(output, 'r' if format_ != 'pickle' else 'rb') as file: + assert output_str == file.read() + os.remove(output) + utils.assert_args_list(print_mock, print_args) + + +def test_analyze(mocker, format_: str | None, output: str | None): + _mock_cli(mocker, format_, output, ['sub-track', 'analyze', '--tf=tracking.sqlite', '--stf=sub-tracking.csv']) + analyzer_mock = mocker.MagicMock(sub_tracking_results=mocker.MagicMock(return_value=PickleableMock())) + SubTrackingAnalyzerMock = mocker.patch('gpu_tracker.__main__.SubTrackingAnalyzer', return_value=analyzer_mock) + _test_process_output(mocker, format_, output) + SubTrackingAnalyzerMock.assert_called_once_with('tracking.sqlite', 'sub-tracking.csv') + analyzer_mock.sub_tracking_results.assert_called_once_with() + + +combine_test_data = [ + (['-p', 'file1.csv', '-p', 'file2.csv', '-p', 'file3.csv'], ['file1.csv', 'file2.csv', 'file3.csv']), + ( + ['-p', 'tests/data/sub-tracking-results/files-to-combine/'], + [ + 'tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite' + ] + ) +] + + +@pt.mark.parametrize('argv,files', combine_test_data) +def test_combine(mocker, argv, files: list[str]): + _mock_cli(mocker, None, None, ['sub-track', 'combine', '--stf=sub-tracking.csv'] + argv) + analyzer_mock = mocker.MagicMock(combine_sub_tracking_files=mocker.MagicMock()) + SubTrackingAnalyzerMock = mocker.patch('gpu_tracker.__main__.SubTrackingAnalyzer', return_value=analyzer_mock) + cli.main() + SubTrackingAnalyzerMock.assert_called_once_with(None, 'sub-tracking.csv') + analyzer_mock.combine_sub_tracking_files.assert_called_once_with(files) + + +compare_test_data = [ + (['-m', 'A=file1.pkl', '-m', 'B=file2.pkl'], 'mean'), + (['--cconfig=tests/data/cconfig.json', '--stat=min'], 'min'), + (['--cconfig=tests/data/cconfig.json'], 'max') +] + + +@pt.mark.parametrize('argv,statistic', compare_test_data) +def test_compare(mocker, format_: str | None, output: str | None, argv, statistic: str): + _mock_cli(mocker, format_, output, ['sub-track', 'compare'] + argv) + comparison_mock = mocker.MagicMock(compare=mocker.MagicMock(return_value=PickleableMock())) + TrackingComparisonMock = mocker.patch('gpu_tracker.__main__.TrackingComparison', return_value=comparison_mock) + _test_process_output(mocker, format_, output) + TrackingComparisonMock.assert_called_once_with({'A': 'file1.pkl', 'B': 'file2.pkl'}) + comparison_mock.compare.assert_called_once_with(statistic) diff --git a/tests/test_tracker.py b/tests/test_tracker.py index 6bd17b4..51a16c3 100644 --- a/tests/test_tracker.py +++ b/tests/test_tracker.py @@ -225,7 +225,7 @@ def start_mock(self): expected_measurements = json.load(file) assert expected_measurements == tracker.to_json() if tracking_file is None: - assert tracker._tracking_process.tracking_file is None + assert tracker._tracking_process.data_proxy is None else: utils.test_tracking_file(actual_tracking_file=tracking_file, expected_tracking_file=f'{expected_measurements_file}.csv') diff --git a/tests/utils.py b/tests/utils.py index aae3e33..c4296c2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -2,7 +2,7 @@ import sqlalchemy as sqlalc import os # noinspection PyProtectedMember -from gpu_tracker._helper_classes import _SQLiteWriter +from gpu_tracker._helper_classes import _SQLiteDataProxy import gpu_tracker as gput @@ -17,7 +17,7 @@ def test_tracking_file( actual_tracking_log = pd.read_csv(actual_tracking_file) else: engine = sqlalc.create_engine(f'sqlite:///{actual_tracking_file}', poolclass=sqlalc.pool.NullPool) - actual_tracking_log = pd.read_sql_table(_SQLiteWriter._DATA_TABLE, engine) + actual_tracking_log = pd.read_sql_table(_SQLiteDataProxy._DATA_TABLE, engine) if excluded_col is not None: actual_tracking_log[excluded_col].apply(excluded_col_test) actual_tracking_log = actual_tracking_log[actual_tracking_log.columns.difference([excluded_col])] From 83a512539ff2a673e2624765edb1031fe0ca55b6 Mon Sep 17 00:00:00 2001 From: erikhuck Date: Thu, 15 May 2025 14:27:48 -0400 Subject: [PATCH 4/7] Completes the analysis test --- src/gpu_tracker/_helper_classes.py | 39 +++-- src/gpu_tracker/sub_tracker.py | 15 +- src/gpu_tracker/tracker.py | 2 +- tests/data/decorated-function-other-file.csv | 4 +- tests/data/decorated-function.csv | 24 +-- tests/data/sub-tracker.csv | 20 +-- ...tmp.sqlite => 1723811.sub-tracking.sqlite} | Bin ...tmp.sqlite => 1723814.sub-tracking.sqlite} | Bin ...tmp.sqlite => 1723815.sub-tracking.sqlite} | Bin ...ub.tmp.sqlite => main.sub-tracking.sqlite} | Bin tests/data/sub-tracking-results/results.json | 1 + tests/data/sub-tracking-results/results.txt | 151 ++++++++++++++++++ .../sub-tracking-results/sub-tracking.csv | 45 ++++++ .../{tmp.sqlite => sub-tracking.sqlite} | Bin 12288 -> 12288 bytes .../data/sub-tracking-results/sub.tmp.sqlite | Bin 8192 -> 0 bytes tests/data/sub-tracking-results/tracking.csv | 28 ++++ .../data/sub-tracking-results/tracking.sqlite | Bin 0 -> 16384 bytes tests/install.sh | 2 +- tests/test_cli.py | 8 +- tests/test_sub_tracker.py | 48 ++++-- tests/test_tracker.py | 13 +- tests/utils.py | 8 +- 22 files changed, 338 insertions(+), 70 deletions(-) rename tests/data/sub-tracking-results/files-to-combine/{1723811.sub.tmp.sqlite => 1723811.sub-tracking.sqlite} (100%) rename tests/data/sub-tracking-results/files-to-combine/{1723814.sub.tmp.sqlite => 1723814.sub-tracking.sqlite} (100%) rename tests/data/sub-tracking-results/files-to-combine/{1723815.sub.tmp.sqlite => 1723815.sub-tracking.sqlite} (100%) rename tests/data/sub-tracking-results/files-to-combine/{main.sub.tmp.sqlite => main.sub-tracking.sqlite} (100%) create mode 100644 tests/data/sub-tracking-results/results.json create mode 100644 tests/data/sub-tracking-results/results.txt create mode 100644 tests/data/sub-tracking-results/sub-tracking.csv rename tests/data/sub-tracking-results/{tmp.sqlite => sub-tracking.sqlite} (63%) delete mode 100644 tests/data/sub-tracking-results/sub.tmp.sqlite create mode 100644 tests/data/sub-tracking-results/tracking.csv create mode 100644 tests/data/sub-tracking-results/tracking.sqlite diff --git a/src/gpu_tracker/_helper_classes.py b/src/gpu_tracker/_helper_classes.py index 9dc5941..1ae3188 100644 --- a/src/gpu_tracker/_helper_classes.py +++ b/src/gpu_tracker/_helper_classes.py @@ -13,6 +13,10 @@ _SUMMARY_STATS = ['min', 'max', 'mean', 'std'] +def _summary_stats(data: pd.Series | pd.DataFrame) -> pd.Series | pd.DataFrame: + stats = data.describe().loc[_SUMMARY_STATS] + stats.loc['std'] = data.std(ddof=0) + return stats class _GPUQuerier(abc.ABC): command = None @@ -348,7 +352,8 @@ def load_code_block_names(self) -> list[str]: return sorted(self.timestamps.code_block_name.unique()) def _overall_timepoint_results(self, fields: list[str]) -> pd.DataFrame: - return self.timepoints[fields].describe().loc[_SUMMARY_STATS].T + stats = _summary_stats(self.timepoints[fields]) + return stats.T class _SQLiteDataProxy(_DataProxy): @@ -441,21 +446,27 @@ def load_timepoints(self, timestamp_pairs: list[tuple[float, float]]) -> pd.Data return self._read_sql(sql) def _overall_timepoint_results(self, fields: list[str]) -> pd.DataFrame: - sql = 'SELECT\n' - std_func = 'sqrt((sum({0} * {0}) - (sum({0}) * sum({0})) / count({0})) / count({0})) AS "STDDEV({0})"' - sql_funcs = 'MIN', 'MAX', 'AVG', 'STDDEV' - field_aggregates = list[str]() - for func in sql_funcs: - for field in fields: - aggregate = f'{func}({field})' if func != 'STDDEV' else std_func.format(field) - field_aggregates.append(aggregate) - sql += ',\n'.join(field_aggregates) - sql += f'\nFROM {_SQLiteDataProxy._DATA_TABLE}' + cte_blocks = list[str]() + selects = list[str]() + for col in fields: + mean_cte = f'mean_{col}' + diff_cte = f'diff_{col}' + cte_blocks.append(f'{mean_cte} AS (SELECT AVG({col}) AS mean FROM {_SQLiteDataProxy._DATA_TABLE})') + cte_blocks.append( + f'{diff_cte} AS (SELECT {col} - (SELECT mean FROM {mean_cte}) AS diff FROM {_SQLiteDataProxy._DATA_TABLE})' + ) + selects.append(f'MIN({col})') + selects.append(f'MAX({col})') + selects.append(f'(SELECT mean FROM {mean_cte}) AS "AVG({col})"') + selects.append(f'(SELECT SQRT(AVG(diff * diff)) FROM {diff_cte}) AS "STDDEV({col})"') + with_clause = "WITH " + ",\n ".join(cte_blocks) + select_clause = "SELECT " + ",\n ".join(selects) + sql = f"{with_clause}\n{select_clause} FROM {_SQLiteDataProxy._DATA_TABLE};" results = self._read_sql(sql).squeeze() reshaped_results = pd.DataFrame() - n_fields = len(fields) - for i, sql_func, index in zip(range(0, len(results), n_fields), sql_funcs, _SUMMARY_STATS): - next_row = results.iloc[i: i + n_fields] + sql_funcs = 'MIN', 'MAX', 'AVG', 'STDDEV' + for sql_func, index in zip(sql_funcs, _SUMMARY_STATS): + next_row = results.loc[[idx.startswith(sql_func) for idx in results.index]] next_row.index = [col.replace(sql_func, '').replace('(', '').replace(')', '') for col in next_row.index] reshaped_results.loc[:, index] = next_row return reshaped_results diff --git a/src/gpu_tracker/sub_tracker.py b/src/gpu_tracker/sub_tracker.py index 9140dee..c8c0b8c 100644 --- a/src/gpu_tracker/sub_tracker.py +++ b/src/gpu_tracker/sub_tracker.py @@ -9,7 +9,7 @@ import pickle as pkl import logging as log import typing as typ -from ._helper_classes import _DataProxy, _SubTrackerLog, _SUMMARY_STATS +from ._helper_classes import _DataProxy, _SubTrackerLog, _SUMMARY_STATS, _summary_stats class SubTracker: @@ -168,7 +168,7 @@ def sub_tracking_results(self) -> SubTrackingResults: for code_block_name in code_block_names: time_stamp_pairs = self.load_timestamp_pairs(code_block_name) time_stamp_diffs = pd.Series([stop_time - start_time for (start_time, stop_time) in time_stamp_pairs]) - compute_time_results = time_stamp_diffs.describe()[_SUMMARY_STATS] + compute_time_results = _summary_stats(time_stamp_diffs) compute_time_results['total'] = time_stamp_diffs.sum().item() timepoints = self.load_timepoints(time_stamp_pairs) num_non_empty_calls = sum( @@ -179,11 +179,11 @@ def sub_tracking_results(self) -> SubTrackingResults: ] ) timepoints = timepoints.drop(columns='timestamp') + resource_usage = _summary_stats(timepoints).T code_block_results.append( CodeBlockResults( name=code_block_name, num_timepoints=len(timepoints), num_calls=len(time_stamp_pairs), - num_non_empty_calls=num_non_empty_calls, compute_time=compute_time_results, - resource_usage=timepoints.describe().loc[_SUMMARY_STATS].T + num_non_empty_calls=num_non_empty_calls, compute_time=compute_time_results, resource_usage=resource_usage ) ) return SubTrackingResults(overall_results, static_data, code_block_results) @@ -409,7 +409,10 @@ def _dict_to_str(string: str, results: dict, indent: int, no_title_keys: set[str string = _dict_to_str(string, value, indent + 1, no_title_keys) elif type(value) is pd.DataFrame: string += f'{key}:\n' - with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 5000): + with pd.option_context( + 'display.max_rows', None, 'display.max_columns', None, 'display.width', 5000, 'display.float_format', + lambda x: f'{float(f"{x:.8f}")}' + ): df_str = str(value) df_str = '\n'.join(indent_str + '\t' + line for line in df_str.splitlines()) string += df_str + '\n' @@ -418,7 +421,7 @@ def _dict_to_str(string: str, results: dict, indent: int, no_title_keys: set[str for value in value: string = _dict_to_str(string, value, indent + 1, no_title_keys) + '\n' else: - value = f'{value:.4f}' if type(value) is float else value + value = f'{value:.8f}' if type(value) is float else value string += f'{key}:{" " * (max_key_len - len(key))} {value}\n' return string diff --git a/src/gpu_tracker/tracker.py b/src/gpu_tracker/tracker.py index 066c120..273c3c3 100644 --- a/src/gpu_tracker/tracker.py +++ b/src/gpu_tracker/tracker.py @@ -65,6 +65,7 @@ def __init__( self._is_linux = platform.system().lower() == 'linux' cannot_connect_warning = ('The {} command is installed but cannot connect to a GPU. ' 'The GPU RAM and GPU utilization values will remain 0.0.') + self.data_proxy = _DataProxy.create(tracking_file, overwrite) if gpu_brand is None: nvidia_available = _NvidiaQuerier.is_available() nvidia_installed = nvidia_available is not None @@ -119,7 +120,6 @@ def __init__( self._resource_usage = ResourceUsage( max_ram=max_ram, max_gpu_ram=max_gpu_ram, cpu_utilization=cpu_utilization, gpu_utilization=gpu_utilization, compute_time=compute_time) - self.data_proxy = _DataProxy.create(tracking_file, overwrite) if self.data_proxy is not None: static_data = _StaticData( ram_unit, gpu_ram_unit, time_unit, max_ram.system_capacity, max_gpu_ram.system_capacity, system_core_count, diff --git a/tests/data/decorated-function-other-file.csv b/tests/data/decorated-function-other-file.csv index ee8c85a..a27c5d5 100644 --- a/tests/data/decorated-function-other-file.csv +++ b/tests/data/decorated-function-other-file.csv @@ -1,3 +1,3 @@ position,process_id,timestamp -START,1234,12 -STOP,1234,13 +0,1234,12 +1,1234,13 diff --git a/tests/data/decorated-function.csv b/tests/data/decorated-function.csv index 46958f9..4ad09e9 100644 --- a/tests/data/decorated-function.csv +++ b/tests/data/decorated-function.csv @@ -1,13 +1,13 @@ position,process_id,timestamp -START,1234,0 -STOP,1234,1 -START,1234,2 -STOP,1234,3 -START,1234,4 -STOP,1234,5 -START,1234,6 -STOP,1234,7 -START,1234,8 -STOP,1234,9 -START,1234,10 -STOP,1234,11 +0,1234,0 +1,1234,1 +0,1234,2 +1,1234,3 +0,1234,4 +1,1234,5 +0,1234,6 +1,1234,7 +0,1234,8 +1,1234,9 +0,1234,10 +1,1234,11 diff --git a/tests/data/sub-tracker.csv b/tests/data/sub-tracker.csv index 1f134bf..6b57616 100644 --- a/tests/data/sub-tracker.csv +++ b/tests/data/sub-tracker.csv @@ -1,11 +1,11 @@ position,process_id,timestamp -START,1234,0 -STOP,1234,1 -START,1234,2 -STOP,1234,3 -START,1234,4 -STOP,1234,5 -START,1234,6 -STOP,1234,7 -START,1234,8 -STOP,1234,9 +0,1234,0 +1,1234,1 +0,1234,2 +1,1234,3 +0,1234,4 +1,1234,5 +0,1234,6 +1,1234,7 +0,1234,8 +1,1234,9 diff --git a/tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite similarity index 100% rename from tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite rename to tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite diff --git a/tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.sqlite similarity index 100% rename from tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite rename to tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.sqlite diff --git a/tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.sqlite similarity index 100% rename from tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite rename to tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.sqlite diff --git a/tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite b/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite similarity index 100% rename from tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite rename to tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite diff --git a/tests/data/sub-tracking-results/results.json b/tests/data/sub-tracking-results/results.json new file mode 100644 index 0000000..a0a29c4 --- /dev/null +++ b/tests/data/sub-tracking-results/results.json @@ -0,0 +1 @@ +{"overall": {"main_ram": {"min": 0.086913024, "max": 1.576493056, "mean": 0.60060155904, "std": 0.5269852839221265}, "descendants_ram": {"min": 0.0, "max": 11.671842816000002, "mean": 2.50271186944, "std": 3.8823366111559143}, "combined_ram": {"min": 0.27824128, "max": 11.736928256, "mean": 3.0928365158399997, "std": 3.56834760161389}, "system_ram": {"min": 49.029361664, "max": 60.485840896000006, "mean": 52.59960434688, "std": 3.4679637005170876}, "main_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "descendants_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "system_gpu_ram": {"min": 0.216, "max": 0.216, "mean": 0.21600000000000003, "std": 2.7755575615628914e-17}, "gpu_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "gpu_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "main_n_threads": {"min": 13.0, "max": 15.0, "mean": 13.96, "std": 0.9991996797437437}, "descendants_n_threads": {"min": 0.0, "max": 36.0, "mean": 17.28, "std": 17.985594235387385}, "combined_n_threads": {"min": 13.0, "max": 51.0, "mean": 31.24, "std": 18.98479391513113}, "cpu_system_sum_utilization_percent": {"min": 115.1, "max": 535.8, "mean": 242.06, "std": 119.4792366898952}, "cpu_system_hardware_utilization_percent": {"min": 9.591666666666663, "max": 44.65, "mean": 20.171666666666667, "std": 9.956603057491266}, "cpu_main_sum_utilization_percent": {"min": 0.0, "max": 101.6, "mean": 48.17199999999999, "std": 46.37932746386045}, "cpu_main_hardware_utilization_percent": {"min": 0.0, "max": 8.466666666666665, "mean": 4.014333333333333, "std": 3.8649439553217038}, "cpu_descendants_sum_utilization_percent": {"min": 0.0, "max": 300.0, "mean": 111.51199999999999, "std": 128.94789744699213}, "cpu_descendants_hardware_utilization_percent": {"min": 0.0, "max": 25.0, "mean": 9.292666666666667, "std": 10.745658120582677}, "cpu_combined_sum_utilization_percent": {"min": 85.59999999999998, "max": 300.0, "mean": 159.684, "std": 89.63866433632307}, "cpu_combined_hardware_utilization_percent": {"min": 7.133333333333333, "max": 25.0, "mean": 13.307, "std": 7.469888694693591}}, "static_data": {"ram_unit": "gigabytes", "gpu_ram_unit": "gigabytes", "time_unit": "hours", "ram_system_capacity": 67.25414912000001, "gpu_ram_system_capacity": 16.376, "system_core_count": 12, "n_expected_cores": 12, "system_gpu_count": 1, "n_expected_gpus": 1}, "code_block_results": [{"name": "X", "num_timepoints": 12, "num_calls": 10, "num_non_empty_calls": 10, "compute_time": {"min": 3.612563371658325, "max": 3.8075716495513916, "mean": 3.7359343767166138, "std": 0.06707601591495356, "total": 37.35934376716614}, "resource_usage": {"main_ram": {"min": 0.086913024, "max": 0.08691712, "mean": 0.086916096, "std": 1.7736200269484101e-06}, "descendants_ram": {"min": 0.213147648, "max": 11.671842816000002, "mean": 5.213983061333333, "std": 4.155068934721833}, "combined_ram": {"min": 0.27824128, "max": 11.736928256, "mean": 5.279072256000001, "std": 4.155067901779363}, "system_ram": {"min": 49.029361664, "max": 60.485840896000006, "mean": 54.712742229333344, "std": 4.04883975253843}, "main_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "descendants_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "system_gpu_ram": {"min": 0.216, "max": 0.216, "mean": 0.21600000000000005, "std": 5.551115123125783e-17}, "gpu_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "gpu_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "main_n_threads": {"min": 15.0, "max": 15.0, "mean": 15.0, "std": 0.0}, "descendants_n_threads": {"min": 36.0, "max": 36.0, "mean": 36.0, "std": 0.0}, "combined_n_threads": {"min": 51.0, "max": 51.0, "mean": 51.0, "std": 0.0}, "cpu_system_sum_utilization_percent": {"min": 122.6, "max": 535.8, "mean": 321.575, "std": 104.04617664767888}, "cpu_system_hardware_utilization_percent": {"min": 10.216666666666669, "max": 44.65, "mean": 26.79791666666667, "std": 8.670514720639908}, "cpu_main_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_main_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_descendants_sum_utilization_percent": {"min": 91.4, "max": 300.0, "mean": 232.3166666666667, "std": 81.09138706148487}, "cpu_descendants_hardware_utilization_percent": {"min": 7.616666666666667, "max": 25.0, "mean": 19.35972222222222, "std": 6.757615588457072}, "cpu_combined_sum_utilization_percent": {"min": 91.4, "max": 300.0, "mean": 232.3166666666667, "std": 81.09138706148487}, "cpu_combined_hardware_utilization_percent": {"min": 7.616666666666667, "max": 25.0, "mean": 19.35972222222222, "std": 6.757615588457072}}}, {"name": "Y", "num_timepoints": 2, "num_calls": 1, "num_non_empty_calls": 1, "compute_time": {"min": 1.847283124923706, "max": 1.847283124923706, "mean": 1.847283124923706, "std": 0.0, "total": 1.847283124923706}, "resource_usage": {"main_ram": {"min": 0.481046528, "max": 1.576493056, "mean": 1.0287697919999998, "std": 0.5477232639999999}, "descendants_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_ram": {"min": 0.481046528, "max": 1.576493056, "mean": 1.0287697919999998, "std": 0.5477232639999999}, "system_ram": {"min": 49.9365888, "max": 50.91651584, "mean": 50.42655232, "std": 0.4899635199999999}, "main_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "descendants_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "system_gpu_ram": {"min": 0.216, "max": 0.216, "mean": 0.216, "std": 0.0}, "gpu_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "gpu_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "main_n_threads": {"min": 13.0, "max": 13.0, "mean": 13.0, "std": 0.0}, "descendants_n_threads": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_n_threads": {"min": 13.0, "max": 13.0, "mean": 13.0, "std": 0.0}, "cpu_system_sum_utilization_percent": {"min": 145.3, "max": 158.79999999999998, "mean": 152.05, "std": 6.749999999999986}, "cpu_system_hardware_utilization_percent": {"min": 12.108333333333334, "max": 13.233333333333333, "mean": 12.670833333333334, "std": 0.5624999999999991}, "cpu_main_sum_utilization_percent": {"min": 85.59999999999998, "max": 101.6, "mean": 93.6, "std": 8.000000000000007}, "cpu_main_hardware_utilization_percent": {"min": 7.133333333333333, "max": 8.466666666666665, "mean": 7.799999999999999, "std": 0.6666666666666661}, "cpu_descendants_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_descendants_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_combined_sum_utilization_percent": {"min": 85.59999999999998, "max": 101.6, "mean": 93.6, "std": 8.000000000000007}, "cpu_combined_hardware_utilization_percent": {"min": 7.133333333333333, "max": 8.466666666666665, "mean": 7.799999999999999, "std": 0.6666666666666661}}}, {"name": "tmp.py:38", "num_timepoints": 8, "num_calls": 10, "num_non_empty_calls": 8, "compute_time": {"min": 1.0983691215515137, "max": 1.2065882682800293, "mean": 1.1289910793304443, "std": 0.03344234830705016, "total": 11.289910793304443}, "resource_usage": {"main_ram": {"min": 0.861704192, "max": 1.2588974080000002, "mean": 1.121115648, "std": 0.12433487309462093}, "descendants_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_ram": {"min": 0.861704192, "max": 1.2588974080000002, "mean": 1.121115648, "std": 0.12433487309462093}, "system_ram": {"min": 50.392174592, "max": 51.044614144, "mean": 50.689142272, "std": 0.1849103280538935}, "main_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "descendants_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "system_gpu_ram": {"min": 0.216, "max": 0.216, "mean": 0.216, "std": 0.0}, "gpu_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "gpu_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "main_n_threads": {"min": 13.0, "max": 13.0, "mean": 13.0, "std": 0.0}, "descendants_n_threads": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_n_threads": {"min": 13.0, "max": 13.0, "mean": 13.0, "std": 0.0}, "cpu_system_sum_utilization_percent": {"min": 115.1, "max": 405.5, "mean": 190.02499999999998, "std": 93.79466602637913}, "cpu_system_hardware_utilization_percent": {"min": 9.591666666666663, "max": 33.791666666666664, "mean": 15.835416666666665, "std": 7.816222168864927}, "cpu_main_sum_utilization_percent": {"min": 90.4, "max": 97.9, "mean": 93.625, "std": 2.4319488070269912}, "cpu_main_hardware_utilization_percent": {"min": 7.533333333333334, "max": 8.158333333333333, "mean": 7.802083333333333, "std": 0.20266240058558257}, "cpu_descendants_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_descendants_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_combined_sum_utilization_percent": {"min": 90.4, "max": 97.9, "mean": 93.625, "std": 2.4319488070269912}, "cpu_combined_hardware_utilization_percent": {"min": 7.533333333333334, "max": 8.158333333333333, "mean": 7.802083333333333, "std": 0.20266240058558257}}}, {"name": "tmp.py:9", "num_timepoints": 1, "num_calls": 1, "num_non_empty_calls": 1, "compute_time": {"min": 1.0934793949127197, "max": 1.0934793949127197, "mean": 1.0934793949127197, "std": 0.0, "total": 1.0934793949127197}, "resource_usage": {"main_ram": {"min": 1.21389056, "max": 1.21389056, "mean": 1.21389056, "std": 0.0}, "descendants_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_ram": {"min": 1.21389056, "max": 1.21389056, "mean": 1.21389056, "std": 0.0}, "system_ram": {"min": 50.539143168, "max": 50.539143168, "mean": 50.539143168, "std": 0.0}, "main_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "descendants_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_gpu_ram": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "system_gpu_ram": {"min": 0.216, "max": 0.216, "mean": 0.216, "std": 0.0}, "gpu_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "gpu_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "main_n_threads": {"min": 13.0, "max": 13.0, "mean": 13.0, "std": 0.0}, "descendants_n_threads": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "combined_n_threads": {"min": 13.0, "max": 13.0, "mean": 13.0, "std": 0.0}, "cpu_system_sum_utilization_percent": {"min": 115.7, "max": 115.7, "mean": 115.7, "std": 0.0}, "cpu_system_hardware_utilization_percent": {"min": 9.641666666666664, "max": 9.641666666666664, "mean": 9.641666666666664, "std": 0.0}, "cpu_main_sum_utilization_percent": {"min": 89.5, "max": 89.5, "mean": 89.5, "std": 0.0}, "cpu_main_hardware_utilization_percent": {"min": 7.458333333333333, "max": 7.458333333333333, "mean": 7.458333333333333, "std": 0.0}, "cpu_descendants_sum_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_descendants_hardware_utilization_percent": {"min": 0.0, "max": 0.0, "mean": 0.0, "std": 0.0}, "cpu_combined_sum_utilization_percent": {"min": 89.5, "max": 89.5, "mean": 89.5, "std": 0.0}, "cpu_combined_hardware_utilization_percent": {"min": 7.458333333333333, "max": 7.458333333333333, "mean": 7.458333333333333, "std": 0.0}}}]} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/results.txt b/tests/data/sub-tracking-results/results.txt new file mode 100644 index 0000000..5712c2f --- /dev/null +++ b/tests/data/sub-tracking-results/results.txt @@ -0,0 +1,151 @@ +Overall: + min max mean std + main_ram 0.08691302 1.57649306 0.60060156 0.52698528 + descendants_ram 0.0 11.67184282 2.50271187 3.88233661 + combined_ram 0.27824128 11.73692826 3.09283652 3.5683476 + system_ram 49.02936166 60.4858409 52.59960435 3.4679637 + main_gpu_ram 0.0 0.0 0.0 0.0 + descendants_gpu_ram 0.0 0.0 0.0 0.0 + combined_gpu_ram 0.0 0.0 0.0 0.0 + system_gpu_ram 0.216 0.216 0.216 0.0 + gpu_sum_utilization_percent 0.0 0.0 0.0 0.0 + gpu_hardware_utilization_percent 0.0 0.0 0.0 0.0 + main_n_threads 13.0 15.0 13.96 0.99919968 + descendants_n_threads 0.0 36.0 17.28 17.98559424 + combined_n_threads 13.0 51.0 31.24 18.98479392 + cpu_system_sum_utilization_percent 115.1 535.8 242.06 119.47923669 + cpu_system_hardware_utilization_percent 9.59166667 44.65 20.17166667 9.95660306 + cpu_main_sum_utilization_percent 0.0 101.6 48.172 46.37932746 + cpu_main_hardware_utilization_percent 0.0 8.46666667 4.01433333 3.86494396 + cpu_descendants_sum_utilization_percent 0.0 300.0 111.512 128.94789745 + cpu_descendants_hardware_utilization_percent 0.0 25.0 9.29266667 10.74565812 + cpu_combined_sum_utilization_percent 85.6 300.0 159.684 89.63866434 + cpu_combined_hardware_utilization_percent 7.13333333 25.0 13.307 7.46988869 +Static Data: + ram_unit gpu_ram_unit time_unit ram_system_capacity gpu_ram_system_capacity system_core_count n_expected_cores system_gpu_count n_expected_gpus + gigabytes gigabytes hours 67.25414912 16.376 12 12 1 1 +Code Block Results: + Name: X + Num Timepoints: 12 + Num Calls: 10 + Num Non Empty Calls: 10 + Compute Time: + min max mean std total + 3.61256337 3.80757165 3.73593438 0.06707602 37.35934377 + Resource Usage: + min max mean std + main_ram 0.08691302 0.08691712 0.0869161 1.77e-06 + descendants_ram 0.21314765 11.67184282 5.21398306 4.15506893 + combined_ram 0.27824128 11.73692826 5.27907226 4.1550679 + system_ram 49.02936166 60.4858409 54.71274223 4.04883975 + main_gpu_ram 0.0 0.0 0.0 0.0 + descendants_gpu_ram 0.0 0.0 0.0 0.0 + combined_gpu_ram 0.0 0.0 0.0 0.0 + system_gpu_ram 0.216 0.216 0.216 0.0 + gpu_sum_utilization_percent 0.0 0.0 0.0 0.0 + gpu_hardware_utilization_percent 0.0 0.0 0.0 0.0 + main_n_threads 15.0 15.0 15.0 0.0 + descendants_n_threads 36.0 36.0 36.0 0.0 + combined_n_threads 51.0 51.0 51.0 0.0 + cpu_system_sum_utilization_percent 122.6 535.8 321.575 104.04617665 + cpu_system_hardware_utilization_percent 10.21666667 44.65 26.79791667 8.67051472 + cpu_main_sum_utilization_percent 0.0 0.0 0.0 0.0 + cpu_main_hardware_utilization_percent 0.0 0.0 0.0 0.0 + cpu_descendants_sum_utilization_percent 91.4 300.0 232.31666667 81.09138706 + cpu_descendants_hardware_utilization_percent 7.61666667 25.0 19.35972222 6.75761559 + cpu_combined_sum_utilization_percent 91.4 300.0 232.31666667 81.09138706 + cpu_combined_hardware_utilization_percent 7.61666667 25.0 19.35972222 6.75761559 + + Name: Y + Num Timepoints: 2 + Num Calls: 1 + Num Non Empty Calls: 1 + Compute Time: + min max mean std total + 1.84728312 1.84728312 1.84728312 0.0 1.84728312 + Resource Usage: + min max mean std + main_ram 0.48104653 1.57649306 1.02876979 0.54772326 + descendants_ram 0.0 0.0 0.0 0.0 + combined_ram 0.48104653 1.57649306 1.02876979 0.54772326 + system_ram 49.9365888 50.91651584 50.42655232 0.48996352 + main_gpu_ram 0.0 0.0 0.0 0.0 + descendants_gpu_ram 0.0 0.0 0.0 0.0 + combined_gpu_ram 0.0 0.0 0.0 0.0 + system_gpu_ram 0.216 0.216 0.216 0.0 + gpu_sum_utilization_percent 0.0 0.0 0.0 0.0 + gpu_hardware_utilization_percent 0.0 0.0 0.0 0.0 + main_n_threads 13.0 13.0 13.0 0.0 + descendants_n_threads 0.0 0.0 0.0 0.0 + combined_n_threads 13.0 13.0 13.0 0.0 + cpu_system_sum_utilization_percent 145.3 158.8 152.05 6.75 + cpu_system_hardware_utilization_percent 12.10833333 13.23333333 12.67083333 0.5625 + cpu_main_sum_utilization_percent 85.6 101.6 93.6 8.0 + cpu_main_hardware_utilization_percent 7.13333333 8.46666667 7.8 0.66666667 + cpu_descendants_sum_utilization_percent 0.0 0.0 0.0 0.0 + cpu_descendants_hardware_utilization_percent 0.0 0.0 0.0 0.0 + cpu_combined_sum_utilization_percent 85.6 101.6 93.6 8.0 + cpu_combined_hardware_utilization_percent 7.13333333 8.46666667 7.8 0.66666667 + + Name: tmp.py:38 + Num Timepoints: 8 + Num Calls: 10 + Num Non Empty Calls: 8 + Compute Time: + min max mean std total + 1.09836912 1.20658827 1.12899108 0.03344235 11.28991079 + Resource Usage: + min max mean std + main_ram 0.86170419 1.25889741 1.12111565 0.12433487 + descendants_ram 0.0 0.0 0.0 0.0 + combined_ram 0.86170419 1.25889741 1.12111565 0.12433487 + system_ram 50.39217459 51.04461414 50.68914227 0.18491033 + main_gpu_ram 0.0 0.0 0.0 0.0 + descendants_gpu_ram 0.0 0.0 0.0 0.0 + combined_gpu_ram 0.0 0.0 0.0 0.0 + system_gpu_ram 0.216 0.216 0.216 0.0 + gpu_sum_utilization_percent 0.0 0.0 0.0 0.0 + gpu_hardware_utilization_percent 0.0 0.0 0.0 0.0 + main_n_threads 13.0 13.0 13.0 0.0 + descendants_n_threads 0.0 0.0 0.0 0.0 + combined_n_threads 13.0 13.0 13.0 0.0 + cpu_system_sum_utilization_percent 115.1 405.5 190.025 93.79466603 + cpu_system_hardware_utilization_percent 9.59166667 33.79166667 15.83541667 7.81622217 + cpu_main_sum_utilization_percent 90.4 97.9 93.625 2.43194881 + cpu_main_hardware_utilization_percent 7.53333333 8.15833333 7.80208333 0.2026624 + cpu_descendants_sum_utilization_percent 0.0 0.0 0.0 0.0 + cpu_descendants_hardware_utilization_percent 0.0 0.0 0.0 0.0 + cpu_combined_sum_utilization_percent 90.4 97.9 93.625 2.43194881 + cpu_combined_hardware_utilization_percent 7.53333333 8.15833333 7.80208333 0.2026624 + + Name: tmp.py:9 + Num Timepoints: 1 + Num Calls: 1 + Num Non Empty Calls: 1 + Compute Time: + min max mean std total + 1.09347939 1.09347939 1.09347939 0.0 1.09347939 + Resource Usage: + min max mean std + main_ram 1.21389056 1.21389056 1.21389056 0.0 + descendants_ram 0.0 0.0 0.0 0.0 + combined_ram 1.21389056 1.21389056 1.21389056 0.0 + system_ram 50.53914317 50.53914317 50.53914317 0.0 + main_gpu_ram 0.0 0.0 0.0 0.0 + descendants_gpu_ram 0.0 0.0 0.0 0.0 + combined_gpu_ram 0.0 0.0 0.0 0.0 + system_gpu_ram 0.216 0.216 0.216 0.0 + gpu_sum_utilization_percent 0.0 0.0 0.0 0.0 + gpu_hardware_utilization_percent 0.0 0.0 0.0 0.0 + main_n_threads 13.0 13.0 13.0 0.0 + descendants_n_threads 0.0 0.0 0.0 0.0 + combined_n_threads 13.0 13.0 13.0 0.0 + cpu_system_sum_utilization_percent 115.7 115.7 115.7 0.0 + cpu_system_hardware_utilization_percent 9.64166667 9.64166667 9.64166667 0.0 + cpu_main_sum_utilization_percent 89.5 89.5 89.5 0.0 + cpu_main_hardware_utilization_percent 7.45833333 7.45833333 7.45833333 0.0 + cpu_descendants_sum_utilization_percent 0.0 0.0 0.0 0.0 + cpu_descendants_hardware_utilization_percent 0.0 0.0 0.0 0.0 + cpu_combined_sum_utilization_percent 89.5 89.5 89.5 0.0 + cpu_combined_hardware_utilization_percent 7.45833333 7.45833333 7.45833333 0.0 + diff --git a/tests/data/sub-tracking-results/sub-tracking.csv b/tests/data/sub-tracking-results/sub-tracking.csv new file mode 100644 index 0000000..d20c760 --- /dev/null +++ b/tests/data/sub-tracking-results/sub-tracking.csv @@ -0,0 +1,45 @@ +process_id,code_block_name,position,timestamp +1723811,X,0,1745449609.7528222 +1723811,X,1,1745449613.5325918 +1723811,X,0,1745449613.5606213 +1723811,X,1,1745449617.3617816 +1723811,X,0,1745449617.3804042 +1723811,X,1,1745449621.187976 +1723814,X,0,1745449609.756739 +1723814,X,1,1745449613.5322783 +1723814,X,0,1745449613.5696628 +1723814,X,1,1745449617.3296978 +1723814,X,0,1745449617.347492 +1723814,X,1,1745449621.1066897 +1723815,X,0,1745449609.7629318 +1723815,X,1,1745449613.3849857 +1723815,X,0,1745449613.403655 +1723815,X,1,1745449617.1585994 +1723815,X,0,1745449617.1770504 +1723815,X,1,1745449620.8635588 +1723815,X,0,1745449620.8831315 +1723815,X,1,1745449624.4956949 +1723727,Y,0,1745449624.6481984 +1723727,Y,1,1745449626.4954815 +1723727,tmp.py:9,0,1745449626.5598955 +1723727,tmp.py:9,1,1745449627.653375 +1723727,tmp.py:38,0,1745449627.6730132 +1723727,tmp.py:38,1,1745449628.7713823 +1723727,tmp.py:38,0,1745449628.7894087 +1723727,tmp.py:38,1,1745449629.898063 +1723727,tmp.py:38,0,1745449629.9163494 +1723727,tmp.py:38,1,1745449631.0465028 +1723727,tmp.py:38,0,1745449631.0629628 +1723727,tmp.py:38,1,1745449632.1779408 +1723727,tmp.py:38,0,1745449632.1962223 +1723727,tmp.py:38,1,1745449633.2969272 +1723727,tmp.py:38,0,1745449633.3154583 +1723727,tmp.py:38,1,1745449634.4144106 +1723727,tmp.py:38,0,1745449634.4333444 +1723727,tmp.py:38,1,1745449635.5694306 +1723727,tmp.py:38,0,1745449635.6578789 +1723727,tmp.py:38,1,1745449636.7815094 +1723727,tmp.py:38,0,1745449636.799852 +1723727,tmp.py:38,1,1745449637.9716456 +1723727,tmp.py:38,0,1745449637.9889736 +1723727,tmp.py:38,1,1745449639.195562 diff --git a/tests/data/sub-tracking-results/tmp.sqlite b/tests/data/sub-tracking-results/sub-tracking.sqlite similarity index 63% rename from tests/data/sub-tracking-results/tmp.sqlite rename to tests/data/sub-tracking-results/sub-tracking.sqlite index 45f423679e55c8bc3485fe482e1e73330824d3bf..46038aa7d28f2cc28b43ff09bb6f8631e5b14662 100644 GIT binary patch literal 12288 zcmeH~T}V?=9Ki2&uDN;dJsxZ7G0oeDpNW}1XoBF(8Kx!awI+pQn~PTNLw6&;FhYf( z@=bz#h`gIx^x%U%MASp{@t_Yv`JfRLeH*<+6!g&DN3cr=d=2>@IPCI2zuz`~90qGY z*5Z-f;%R@M*CC6QOd7+oOr0n)41?9Ji*HtaHK{vU-G5hPw$Z{&+1X`YrC2n|piz{+ zkpSmV017|>C;$bZ02F`%Pyh-*0Vn_kHfez&){tp7vzIz$hqK4s?T{Ujopi9xYLTp> zWT|hlijj#}WHJW&{4RGeX!mrB^-YJGj!LDX$>{QTyY0>%zw4ac=kU5k$!d$v1OA{# z_V|4XsO<5&gR;XL5F1-sEt07?^549a6BzUjt)b^=0o_6qXb7D_9jG4dLpjJO{1Dy= z%fdt9j&NNV6MBU%p+P7S5dWQDb*&NXnw>Kns? z0#E=7KmjNK1)u;FfC5n9KP^B?^}0NxA^S+H>JsD8;5sVO~Q`2VT6vT%4MTey?!jOBScbsn{rVytWmaG*qaAJjF&0rF5S^ ziBmL`;!In`1`RbG@Oki-IHklsvBj{2^>HdT3fXK|aI%I9ohbBSorY3eZ>w>VhEkm7 t-@Mqe*oC`$SJ)*M zqbbqGwm#Z)#4&9Z$pdK3SfevBsjU`C0mU*hC5dfD8jZ+k3{9+9YwtPtvfjP8LTCC% z|G2-|%kTWoIrn_d`JUf7yOb0c*cgkprp{@n7;T1Zj*K8=pw-G`G7bC8VxLKqvxT6^ zfc1o+9H^*~rGIZAjBS;N9F>J%VE;%aDIh5zDIh5zDIh5zDIh5zDIh8E-zd-$D_2BE zMG-q=7^=F~Vx|}hJ+pGw=Vq7YYD=?M7vySDjW$)SvQsvP!AaS*YYWz8moCw&Rc4FJ zWO0}&2jlW@FxA*{snEc;+)28uy&}z`_li(fGm_W{&a6HzS6x_sjr{&2Diql)awI z33b}mC4?e6A%S=~9-j`jjj@>w=r{<9a=)`8Xh27XJuL>e!^UXKv)AY7vT=^^**T>i z;ipuh0r-qCQT3F`#(4ZQ_K_VX->hRZ+EnLevt5v@4uhq!-eO|dQ)*&6_~o(SAU*0m zb}7Es)M@B{b0VTw#(pG|6p$2<6p$2<6p$2<6p$2<6p$2<6p$2<6p$47|5qS`P{+qd zD-;BwQdwGe#LE%N@SWjv z!hRYyKlI&D2ssm?Q}?MisIIFTRkM|Ql<|sA#fsqW;F8&&&b9|lu&+oaDIh5zDe%8g zfR2e)C>08&QprAql2EY5mK6$eJUF)I&##`I^q~L2vC&<}4w1cwM@nv=8x9KtPbHoK zz-ML1pdz>i8N?mUKqii}Kay9yb>@ROdVV0}&7mjv94z{RKcVzB+eHi0te`21yIKRh99=E{s7_*$1K+BU}IU zJ{=hddCUF6K;*>%lJ}t3ceah4{GkA8v~fcxU0%f66Hpn0;#Sbk#U_q1a^1&m^kadL zUmiGkXmt0_NZy0KH1n5Ni@y*cO(rf~(uNUgA5?mfW0A!?(m%$?pvv>~ygO=$dsTp$eD&)-XlvQM6~zeo#(Pm!^xS~Rl+Sr)v)j)cN>Xfglzv?%d>QNAmp7DMLip`Z<4$Ry}QU9yWJ^3nuhVRU~vOB zD8q}c0Szy#D8|UWAIj*kK*+y-*kH64zew^Pbhj@j7F;n2kkx6Nkg1sn*$B_`NG-aI z*b7e-W2Ej>O>HRqd(?NW6YRCl-tG$4k?%rA{wMwUpuBR=cz(wqsA}`o&VQ%Fb2-f| z2d)_>(GiImu%R8ntdQuytSX2@m+spgz5=2hzL&1?hD@a)`~zBWapy}bLi!+hXkzgv zTT(%PspuEC@7xs9jJvq~OOE3A4~xyF5Z1a8%Kc-^?#jFGRZqhv$DSKZK0ADZykS)J zmvtn8?!NVO!{$;U+nI<`sUMa<&EEq@xir>db^}!U$Cz!@rh8S>up#N!g{yy8;(?^| zzv@a|x(mKNy8dEc$b2EYF@$4*0>hyGYrMtq%mMBu7R8u7@5vHI>CcvPU)uuBHOrb( zHn#l+H0~$<_3VaAP;fQZb#L<@1#F@ivUs)W-CUk!)^bXSVnl8G{fZk#g+CSLm7I_h zzMUp-X*91bKdBN9f@5UR8$6ZZ zpG(Q#nb)~8M+M1EHIKJxwNRLmTGf7p6cU&=ZWSwAxD2l6ucv2RoYtZkv*X_x^3I-) zz2Gkz@10{Q5WC>?y571nDA2{d_R7bxLiVO|ZvR&GaQ71ZIscR8Q_hQG#6Hw}>#jFw z8a8<~{A*8I&k6Ec;#2X*Ta!Rv*F3s^v`ff#KgO*uD+cZ6KA`t=ai-^?F=l&0C%m%) zvdJ5JyOtb*l#07OQLpZV6i?w>;VKFWzqM33c;qL3Yzi@2A!{q%RiICzJ?MkXcBF?z z9AkEB?G~?mI(AI)>hAJ0yCG)p=iBp6WkLS?dBtT{_6ykzW>{e*mV5^|*Kqs4{JY$j ztSCn8gBM2*dWmV+WUsDv&XFJen7nZAx7(U8^nh;LAL2I`+lA~cAE5Qsz}gZ{Yx@|- O&KQ4ajM?A+XyiX&8PqQT diff --git a/tests/data/sub-tracking-results/sub.tmp.sqlite b/tests/data/sub-tracking-results/sub.tmp.sqlite deleted file mode 100644 index ac7612301f94cd71343cd6e6f1b9c85e28a2eaa8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8192 zcmeH~T}V?=9Ki2&Tl3@Ib3E44W16=wbtY>1pcR6e3(A$G*P0ZLeMKv`UEPhEi4iD7 zAHGSD50Q797Crc24-xecg%A25ln)v~(YMiCL_rVTeFz>p&}+~+?7;Rvzu(>XabDJW z#O_yp(#cSNz@iPhCUva>j`}^TYXgr+$?A=h4MB2dy@GO9JTQaFuEsn*oHzUz1g-5P(mg16JHnf~PH zEp$s5{e&gDmE&>a5~?-%)I5eaQ?1D-KR@DPx-}3xkBjJ5B=QPxqFc2i_i!QI+SMat zOts=)^5O*rCPVSsyRkcQHR7Ka-WTNSCu^@p>O@LkJR-*1b5006}RE zZ5)XNHK9Czjf6B6PyVjh{ueyTNmHp&^k`!{$|k627k-Aa2ukywJ&rO7N^|EfAu~Zu zC_z73m!`DTH`>kf$dsm1qo~7i6&VRCdaNvn3Lg3{cUUx+8D38kv>H`g+5JY)ww`1XwwA8v{fVzpf!^kGowjuV$l>ISSwSKm}aEWh?K<8#EP}{o^!72-HR*y(do3E z^UW^b{q~%Df9L$p;jokx7g%_+wzko3HSpSWMW}*d6rj~A6pBT{+gE%uLJk(Mg@glr zHeD=Iq<(KODC7l&Pe8dMur;7OuywjyXgf7P4NwEr05w1jPy^HeHSoV?U}LPmG9oUH zdCHGB)HIkKyn(kE^(F&vkX-s?Kc16SnxievT2YXrl{9FRHEO%Ts&BSgcx`D;W$8k# zMqSs`te^5BZ?T#^yP+-)r-L_J^+rRJ!D!)~+EoRsvpk;RwZrX(M!Q*WY;3mi+7-H7 zT_L{NrZ=}XnT@>JBsAI04oQ=E8yez0?h=}B-=s!Psc6vDkz$dUO{D=Q!&Bo7Ncw_4n?I2X52z^>(wtblWJ}BGRGX zt=&ENyEdX>l8xhSwC;UmywmrdWA?ZK4|_(B#yw~LVCl4H^nB?JGsn2?&4}UdJu?9M zMHWr~YZKYxniS#0tN%}&dO-q&2^mlbFB;SUH9!qe1JnRDKn+j>)BrU=4NwEr05$O6 zZy=q~#KuM_l?MQD2wV!H-DpuL6T{|*COWQ0U&Fdi14}6`Ov_iWzjKZ?yC6Pe_n@=ZivgLi~+G zLL8RQN?s%6D}Q-;B^T}uIkwMOvvoetiWzkJk$|sGU6mqxPY7`m5~9(p&O;d%n+DO5 zXeckpYmDst{o7oaH{{jkHG^U223awKPS@KtcKC-9WVHo7`b9{%FkgrVp)wMcouHkC zLtbO#>JPiP`@JE*+`VV-==PtmVg_Al#?LPme=b29jbyu|3=>qPlVokNi1e&6(zo(7 zH`^O>qOQ%fTK_34X3&+px&~ibC_x&s2{OrdB?_nrB%))`(jGb_uMzU)L*Z+4kXBs{B7sOT1V>=qpI8Y8(KX(vG>a>%m>xQo7JbE`6H+s z@-_B>u;k&;?rSmH(Atp8$x|i1u^K*eXYlRfoS_~rbf8}rqU4p4$Z%~ z_sOLJ1K>Y2x!|J>DWE%F^s{T%-M^ZMR=tg^Ut+kpept4!g|O>ZDEF)}yD#^aOEV3d z9eeC-;<4ek*zs!h$+DgV$h%ed-n#Xr(wW-h$zEFWFggc9q4Eu~jkTEFBCj#KTAS)p zPs4_UUly+TO^Fi{PXD4WdGR**_UPm1P6W)6vReZP3zQfJOhhPQqr2Arsp!1#UapXBiE|gseu? zuHP;luU2|e(Z2Fwd>Hpivsa^{o?miU&3>79@N#?C5zvi(HTSJ2ha^;J!3TdQEWdyc z2WDZU@WKS!$XS>FRD$PTO8n03-euV;NNlTppi8TT!t~^-?)|Kkz;}^ZEbAaHo5bVk z5eIQCt1&zF^&ywvbnLl*igH~)kOcGQzPEayu?!0GVxHgp;XEn3rJOLAA1CJ$_nG*W zerjv&m4oiP4C98FSbhA8{R?l ztA$l1#I<#du+t~*T4VMPJ{ZB9f&B21T+>TxfEu6%r~zt#8lVQK0cwC6pa!S`YJeK} HKQizyht<)I literal 0 HcmV?d00001 diff --git a/tests/install.sh b/tests/install.sh index 7a9a002..12cdaa2 100644 --- a/tests/install.sh +++ b/tests/install.sh @@ -4,5 +4,5 @@ echo "Creating new .env/ directory..." python3 -m venv .env/ source .env/bin/activate python3 -m pip install --upgrade pip -python3 -m pip install pytest pytest-mock pytest-cov sphinx sphinx-rtd-theme notebook +python3 -m pip install pytest pytest-mock pytest-cov sphinx sphinx-rtd-theme notebook deepdiff python3 -m pip install -e . diff --git a/tests/test_cli.py b/tests/test_cli.py index dd23414..9bce8f7 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -151,10 +151,10 @@ def test_analyze(mocker, format_: str | None, output: str | None): ( ['-p', 'tests/data/sub-tracking-results/files-to-combine/'], [ - 'tests/data/sub-tracking-results/files-to-combine/1723811.sub.tmp.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/1723814.sub.tmp.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/1723815.sub.tmp.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/main.sub.tmp.sqlite' + 'tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite' ] ) ] diff --git a/tests/test_sub_tracker.py b/tests/test_sub_tracker.py index a3e54fb..2a0af33 100644 --- a/tests/test_sub_tracker.py +++ b/tests/test_sub_tracker.py @@ -1,4 +1,6 @@ import pytest as pt +import json +import deepdiff as deepd import gpu_tracker as gput import utils @@ -8,25 +10,23 @@ def get_code_block_name(request) -> str | None: yield request.param -@pt.fixture(name='sub_tracking_file', params=['sub-tracking-file.csv', 'sub-tracking-file.sqlite', None]) +@pt.fixture(name='sub_tracking_file', params=['sub-tracking-file.csv', 'sub-tracking-file.sqlite']) def get_sub_tracking_file(request) -> str | None: yield request.param -def test_sub_tracker(mocker, code_block_name: str | None, sub_tracking_file: str | None): +def test_sub_tracker(mocker, code_block_name: str | None, sub_tracking_file: str): + sub_tracking_file = f'{code_block_name}_{sub_tracking_file}' n_iterations = 5 getpid_mock = mocker.patch('gpu_tracker.sub_tracker.os.getpid', side_effect=[1234] * n_iterations) time_mock = mocker.patch( 'gpu_tracker.sub_tracker.time', time=mocker.MagicMock(side_effect=range(n_iterations * 2))) - default_code_block_end = 'test_sub_tracker.py:23' + default_code_block_end = 'test_sub_tracker.py:26' for _ in range(n_iterations): with gput.SubTracker(code_block_name=code_block_name, sub_tracking_file=sub_tracking_file) as sub_tracker: if code_block_name is None: assert sub_tracker.code_block_name.endswith(default_code_block_end) - if sub_tracking_file is None: - assert sub_tracker.sub_tracking_file == '1234.csv' - if sub_tracking_file is None: - assert len(getpid_mock.call_args_list) == n_iterations + assert len(getpid_mock.call_args_list) == n_iterations assert len(time_mock.time.call_args_list) == n_iterations * 2 def code_block_name_test(val: str): @@ -36,7 +36,7 @@ def code_block_name_test(val: str): assert val == code_block_name utils.test_tracking_file( actual_tracking_file=sub_tracker.sub_tracking_file, expected_tracking_file='tests/data/sub-tracker.csv', - excluded_col='code_block_name', excluded_col_test=code_block_name_test + excluded_col='code_block_name', excluded_col_test=code_block_name_test, is_sub_tracking=True ) @@ -46,7 +46,9 @@ def get_code_block_attribute(request): def test_decorator(mocker, code_block_name: str | None, code_block_attribute: str | None): - @gput.sub_track(code_block_name=code_block_name, code_block_attribute=code_block_attribute) + sub_tracking_file = f'{code_block_name}_{code_block_attribute}.csv' + + @gput.sub_track(code_block_name=code_block_name, code_block_attribute=code_block_attribute, sub_tracking_file=sub_tracking_file) def decorated_function(arg1: int, arg2: int, kwarg1: int = 1, kwarg2: int = 2) -> int: return arg1 + arg2 - (kwarg1 + kwarg2) getpid_mock = mocker.patch('gpu_tracker.sub_tracker.os.getpid', return_value=1234) @@ -69,8 +71,8 @@ def code_block_name_test(val): else: assert val == code_block_name utils.test_tracking_file( - actual_tracking_file='1234.csv', expected_tracking_file=f'tests/data/decorated-function.csv', - excluded_col='code_block_name', excluded_col_test=code_block_name_test + actual_tracking_file=sub_tracking_file, expected_tracking_file=f'tests/data/decorated-function.csv', + excluded_col='code_block_name', excluded_col_test=code_block_name_test, is_sub_tracking=True ) if code_block_name is None and code_block_attribute is None: return_val = utils.function_in_other_file(1, 2, 3, kw1=4, kw2=5) @@ -81,6 +83,26 @@ def code_block_name_test(val): def code_block_name_test(val): assert val.endswith('utils.py:function_in_other_file') utils.test_tracking_file( - actual_tracking_file='1234.csv', expected_tracking_file='tests/data/decorated-function-other-file.csv', - excluded_col='code_block_name', excluded_col_test=code_block_name_test + actual_tracking_file=f'1234.csv', expected_tracking_file='tests/data/decorated-function-other-file.csv', + excluded_col='code_block_name', excluded_col_test=code_block_name_test, is_sub_tracking=True ) + + +@pt.fixture(name='format_', params=['csv', 'sqlite']) +def get_format(request): + yield request.param + + +def test_analysis(format_): + folder = 'tests/data/sub-tracking-results' + tracking_file = f'{folder}/tracking.{format_}' + sub_tracking_file = f'{folder}/sub-tracking.{format_}' + analyzer = gput.SubTrackingAnalyzer(tracking_file, sub_tracking_file) + actual_results = analyzer.sub_tracking_results() + with open(f'{folder}/results.json', 'r') as file: + expected_json_results = json.load(file) + diff = deepd.DeepDiff(expected_json_results, actual_results.to_json(), significant_digits=12) + assert not diff + with open(f'{folder}/results.txt', 'r') as file: + expected_str_results = file.read() + assert expected_str_results == str(actual_results) diff --git a/tests/test_tracker.py b/tests/test_tracker.py index 51a16c3..4c9542e 100644 --- a/tests/test_tracker.py +++ b/tests/test_tracker.py @@ -227,7 +227,8 @@ def start_mock(self): if tracking_file is None: assert tracker._tracking_process.data_proxy is None else: - utils.test_tracking_file(actual_tracking_file=tracking_file, expected_tracking_file=f'{expected_measurements_file}.csv') + utils.test_tracking_file( + actual_tracking_file=tracking_file, expected_tracking_file=f'{expected_measurements_file}.csv') def test_cannot_connect_warnings(mocker, caplog): @@ -341,15 +342,17 @@ def test_validate_arguments(mocker): gput.Tracker(ram_unit='milibytes') assert str(error.value) == '"milibytes" is not a valid RAM unit. Valid values are bytes, gigabytes, kilobytes, megabytes, terabytes' subprocess_mock = mocker.patch( - 'gpu_tracker._helper_classes.subp', check_output=mocker.MagicMock( - side_effect=[b'', b'', b'uuid ,memory.total [MiB] \ngpu-id1,2048 MiB\ngpu-id2,2048 MiB', b'', b'', b'uuid ,memory.total [MiB] '])) + 'gpu_tracker._helper_classes.subp.check_output', side_effect=[ + b'', b'', b'uuid ,memory.total [MiB] \ngpu-id1,2048 MiB\ngpu-id2,2048 MiB', b'', b'', b'uuid ,memory.total [MiB] ' + ] + ) with pt.raises(ValueError) as error: gput.Tracker(gpu_uuids={'invalid-id'}) - assert len(subprocess_mock.check_output.call_args_list) == 3 + assert len(subprocess_mock.call_args_list) == 3 assert str(error.value) == 'GPU UUID of invalid-id is not valid. Available UUIDs are: gpu-id1, gpu-id2' with pt.raises(ValueError) as error: gput.Tracker(gpu_uuids=set[str]()) - assert len(subprocess_mock.check_output.call_args_list) == 6 + assert len(subprocess_mock.call_args_list) == 6 assert str(error.value) == 'gpu_uuids is not None but the set is empty. Please provide a set of at least one GPU UUID.' with pt.raises(ValueError) as error: gput.Tracker(gpu_brand='invalid-brand') diff --git a/tests/utils.py b/tests/utils.py index c4296c2..75fa4fa 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -12,9 +12,13 @@ def assert_args_list(mock, expected_args_list: list[tuple | dict], use_kwargs: b def test_tracking_file( - actual_tracking_file: str, expected_tracking_file: str, excluded_col: str | None = None, excluded_col_test=None): + actual_tracking_file: str, expected_tracking_file: str, excluded_col: str | None = None, excluded_col_test=None, + is_sub_tracking: bool = False): if actual_tracking_file.endswith('.csv'): - actual_tracking_log = pd.read_csv(actual_tracking_file) + if is_sub_tracking: + actual_tracking_log = pd.read_csv(actual_tracking_file) + else: + actual_tracking_log = pd.read_csv(actual_tracking_file, skiprows=2) else: engine = sqlalc.create_engine(f'sqlite:///{actual_tracking_file}', poolclass=sqlalc.pool.NullPool) actual_tracking_log = pd.read_sql_table(_SQLiteDataProxy._DATA_TABLE, engine) From bca3b2668a2c8c465fbb435314bad457f0ed81e1 Mon Sep 17 00:00:00 2001 From: erikhuck Date: Thu, 15 May 2025 17:42:54 -0400 Subject: [PATCH 5/7] Completes the test for combining sub-tracking files --- .../files-to-combine/1723811.sub-tracking.csv | 7 ++++ .../1723811.sub-tracking.sqlite | Bin 8192 -> 8192 bytes .../files-to-combine/1723814.sub-tracking.csv | 7 ++++ .../1723814.sub-tracking.sqlite | Bin 8192 -> 8192 bytes .../files-to-combine/1723815.sub-tracking.csv | 9 +++++ .../1723815.sub-tracking.sqlite | Bin 8192 -> 8192 bytes .../files-to-combine/main.sub-tracking.csv | 25 +++++++++++++ .../files-to-combine/main.sub-tracking.sqlite | Bin 8192 -> 8192 bytes tests/test_cli.py | 6 +++- tests/test_sub_tracker.py | 33 ++++++++++++++++-- 10 files changed, 84 insertions(+), 3 deletions(-) create mode 100644 tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.csv create mode 100644 tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.csv create mode 100644 tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.csv create mode 100644 tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv diff --git a/tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.csv b/tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.csv new file mode 100644 index 0000000..54cf550 --- /dev/null +++ b/tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.csv @@ -0,0 +1,7 @@ +process_id,code_block_name,position,timestamp +1723811,X,0,1745449609.7528222 +1723811,X,1,1745449613.5325918 +1723811,X,0,1745449613.5606213 +1723811,X,1,1745449617.3617816 +1723811,X,0,1745449617.3804042 +1723811,X,1,1745449621.187976 diff --git a/tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite b/tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite index 9738eb51bb4b6b013b8d3c973b5c8d597ab1b558..405a6241ede4d39425cda82e4b7e1e4c3d1d69fe 100644 GIT binary patch delta 117 zcmZp0XmFSyEy%^dz`z8=Fu*)f$C#gsK`-k9FHne)znOu*d9$K`C_iU4BfGe$C}U&f zaZypm z#>mO-{Cb8?p6;H0Avy|NoXPnqsqslU`N`Swd5O8H3L&l$VDW?oF{5%DS(#@~= rck(ieNs3Qaloy@6N7j=0n54+$3v!~9dF8s8DWY>FaiK%F(WPj diff --git a/tests/test_cli.py b/tests/test_cli.py index 9bce8f7..f5cc7fb 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -154,7 +154,11 @@ def test_analyze(mocker, format_: str | None, output: str | None): 'tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite', 'tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.sqlite', 'tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite' + 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.csv', + 'tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.csv', + 'tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.csv', + 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv' ] ) ] diff --git a/tests/test_sub_tracker.py b/tests/test_sub_tracker.py index 2a0af33..e9e8ea6 100644 --- a/tests/test_sub_tracker.py +++ b/tests/test_sub_tracker.py @@ -1,5 +1,8 @@ import pytest as pt import json +import os +import pandas as pd +import sqlalchemy as sqlalc import deepdiff as deepd import gpu_tracker as gput import utils @@ -21,7 +24,7 @@ def test_sub_tracker(mocker, code_block_name: str | None, sub_tracking_file: str getpid_mock = mocker.patch('gpu_tracker.sub_tracker.os.getpid', side_effect=[1234] * n_iterations) time_mock = mocker.patch( 'gpu_tracker.sub_tracker.time', time=mocker.MagicMock(side_effect=range(n_iterations * 2))) - default_code_block_end = 'test_sub_tracker.py:26' + default_code_block_end = 'test_sub_tracker.py:29' for _ in range(n_iterations): with gput.SubTracker(code_block_name=code_block_name, sub_tracking_file=sub_tracking_file) as sub_tracker: if code_block_name is None: @@ -93,7 +96,7 @@ def get_format(request): yield request.param -def test_analysis(format_): +def test_analysis(format_: str): folder = 'tests/data/sub-tracking-results' tracking_file = f'{folder}/tracking.{format_}' sub_tracking_file = f'{folder}/sub-tracking.{format_}' @@ -106,3 +109,29 @@ def test_analysis(format_): with open(f'{folder}/results.txt', 'r') as file: expected_str_results = file.read() assert expected_str_results == str(actual_results) + + +def test_combine(format_: str): + folder = 'tests/data/sub-tracking-results' + files = [f'{folder}/files-to-combine/{name}' for name in os.listdir(f'{folder}/files-to-combine') if name.endswith(format_)] + with pt.raises(ValueError) as error: + wrong_extension = "csv" if format_ == "sqlite" else "sqlite" + invalid_file = f'wrong-extension.{wrong_extension}' + analyzer = gput.SubTrackingAnalyzer(None, invalid_file) + analyzer.combine_sub_tracking_files(files) + assert str(error.value) == f'File {files[0]} does not end with the same extension as {invalid_file}. Must end in ".{wrong_extension}".' + sub_tracking_file = f'combined.{format_}' + analyzer = gput.SubTrackingAnalyzer(None, sub_tracking_file) + analyzer.combine_sub_tracking_files(files) + expected_path = f'{folder}/sub-tracking.{format_}' + if format_ == 'csv': + expected_results = pd.read_csv(expected_path) + actual_results = pd.read_csv(sub_tracking_file) + else: + expected_results = pd.read_sql('data', sqlalc.create_engine(f'sqlite:///{expected_path}')) + actual_results = pd.read_sql('data', sqlalc.create_engine(f'sqlite:///{sub_tracking_file}')) + pd.testing.assert_frame_equal(expected_results, actual_results, atol=1e-10, rtol=1e-10) + with pt.raises(ValueError) as error: + analyzer.combine_sub_tracking_files(files) + assert str(error.value) == f'Cannot create sub-tracking file {sub_tracking_file}. File already exists.' + os.remove(sub_tracking_file) From 530abcadaf47816cbd876a48f17d2249fbb81632 Mon Sep 17 00:00:00 2001 From: erikhuck Date: Mon, 19 May 2025 18:29:21 -0400 Subject: [PATCH 6/7] Completes tests for the next release --- .github/workflows/tests.yml | 2 +- requirements.txt | 1 + src/gpu_tracker/_helper_classes.py | 4 +- src/gpu_tracker/sub_tracker.py | 12 +- src/gpu_tracker/tracker.py | 5 + .../sub-tracking-results/comparison_max.json | 1 + .../sub-tracking-results/comparison_max.txt | 351 ++++++++++++++++++ .../sub-tracking-results/comparison_mean.json | 1 + .../sub-tracking-results/comparison_mean.txt | 351 ++++++++++++++++++ .../sub-tracking-results/comparison_min.json | 1 + .../sub-tracking-results/comparison_min.txt | 351 ++++++++++++++++++ .../sub-tracking-results/comparison_std.json | 1 + .../sub-tracking-results/comparison_std.txt | 351 ++++++++++++++++++ .../files-to-combine/main.sub-tracking.csv | 1 + tests/data/sub-tracking-results/invalid1.csv | 6 + tests/data/sub-tracking-results/invalid2.csv | 4 + tests/data/sub-tracking-results/results-A.pkl | Bin 11883 -> 12036 bytes tests/data/sub-tracking-results/results-B.pkl | Bin 11883 -> 12072 bytes tests/data/sub-tracking-results/results-C.pkl | Bin 0 -> 12084 bytes tests/data/sub-tracking-results/results-D.pkl | Bin 0 -> 12117 bytes tests/data/sub-tracking-results/results-E.pkl | Bin 0 -> 12117 bytes tests/data/sub-tracking-results/results-F.pkl | Bin 0 -> 3842 bytes tests/data/sub-tracking-results/results-G.pkl | Bin 0 -> 3842 bytes .../sub-tracking-results/sub-tracking.csv | 1 + tests/test_sub_tracker.py | 81 +++- tests/test_tracker.py | 22 +- tests/utils.py | 6 + 27 files changed, 1532 insertions(+), 21 deletions(-) create mode 100644 tests/data/sub-tracking-results/comparison_max.json create mode 100644 tests/data/sub-tracking-results/comparison_max.txt create mode 100644 tests/data/sub-tracking-results/comparison_mean.json create mode 100644 tests/data/sub-tracking-results/comparison_mean.txt create mode 100644 tests/data/sub-tracking-results/comparison_min.json create mode 100644 tests/data/sub-tracking-results/comparison_min.txt create mode 100644 tests/data/sub-tracking-results/comparison_std.json create mode 100644 tests/data/sub-tracking-results/comparison_std.txt create mode 100644 tests/data/sub-tracking-results/invalid1.csv create mode 100644 tests/data/sub-tracking-results/invalid2.csv create mode 100644 tests/data/sub-tracking-results/results-C.pkl create mode 100644 tests/data/sub-tracking-results/results-D.pkl create mode 100644 tests/data/sub-tracking-results/results-E.pkl create mode 100644 tests/data/sub-tracking-results/results-F.pkl create mode 100644 tests/data/sub-tracking-results/results-G.pkl diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e7bf827..77c876f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -24,7 +24,7 @@ jobs: - name: Install testing environment run: | python3 -m pip install --upgrade pip - python3 -m pip install pytest pytest-mock pytest-cov + python3 -m pip install pytest pytest-mock pytest-cov deepdiff - name: Install package uses: Wandalen/wretry.action@master with: diff --git a/requirements.txt b/requirements.txt index 3fae670..b8f6ba3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ psutil>=6.0.0 docopt>=0.6.2 pandas>=2.2.3 SQLAlchemy>=2.0.39 +tqdm>=4.67.1 diff --git a/src/gpu_tracker/_helper_classes.py b/src/gpu_tracker/_helper_classes.py index 1ae3188..5d91276 100644 --- a/src/gpu_tracker/_helper_classes.py +++ b/src/gpu_tracker/_helper_classes.py @@ -260,7 +260,7 @@ def load_timestamp_pairs(self, code_block_name: str) -> list[tuple[float, float] error_prefix = f'Sub-tracking file is invalid. Detected timestamp pair ({start_time}, {stop_time})' if pid1 != pid2: raise ValueError(f'{error_prefix} with differing process IDs: {pid1} and {pid2}.') - if start_time > stop_time: + if timestamp1.position > timestamp2.position: raise ValueError(f'{error_prefix} of process ID {pid1} with a start time greater than the stop time.') timestamp_pairs.append((start_time, stop_time)) return timestamp_pairs @@ -306,8 +306,6 @@ def timepoints(self): return self._timepoints def _write_static_data(self, data: _StaticData): - if self._file_name in _DataProxy._files_w_data: - raise RuntimeError('The static data for a CSV file must be created before the dynamic data.') static_data = dclass.asdict(data) self._create_table(static_data) self._write_data(static_data) diff --git a/src/gpu_tracker/sub_tracker.py b/src/gpu_tracker/sub_tracker.py index c8c0b8c..28b2858 100644 --- a/src/gpu_tracker/sub_tracker.py +++ b/src/gpu_tracker/sub_tracker.py @@ -204,9 +204,7 @@ def __init__(self, file_path_map: dict[str, str]): :param file_path_map: Mapping of the name of each tracking session to the path of the pickle file containing the ``SubTrackingResults`` of the corresponding tracking sessions. Used to construct the ``results_map`` attribute. :raises ValueError: Raised if the code block results of each tracking session don't match. """ - for name in file_path_map.keys(): - self._name1 = name - break + [self._name1] = sorted(file_path_map.keys())[:1] self.results_map = dict[str, SubTrackingResults]() for name, file in file_path_map.items(): with open(file, 'rb') as file: @@ -221,7 +219,7 @@ def __init__(self, file_path_map: dict[str, str]): code_block_results2 = TrackingComparison._sort_code_block_results(results) if len(code_block_results1) != len(code_block_results2): raise ValueError( - f'All sub-tracking results must have the same number of code blocks. First has {len(code_block_results1)}' + f'All sub-tracking results must have the same number of code blocks. The first has {len(code_block_results1)}' f' code blocks but tracking session "{name2}" has {len(code_block_results2)} code blocks.' ) for code_block_results1_, code_block_results2_ in zip(code_block_results1, code_block_results2): @@ -273,7 +271,7 @@ def compare(self, statistic: str = 'mean') -> ComparisonResults: ) code_block_compute_times = TrackingComparison._get_code_block_comparisons( self.results_map, lambda code_block_result: code_block_result.compute_time[statistic].item() - ) if results1.code_block_results else dict() + ) if results1.code_block_results else dict[str, pd.Series]() return ComparisonResults( overall_resource_usage=overall_resource_usages, code_block_resource_usage=code_block_resource_usages, code_block_compute_time=code_block_compute_times @@ -319,7 +317,9 @@ def _get_code_block_comparisons(name_to_results: dict[str, SubTrackingResults], ] for name, results in name_to_results.items() ] ): - code_block_name = f'{" -> ".join({code_block_results.name for _, code_block_results in matching_code_block_results})}' + code_block_name = f'{" -> ".join( + sorted({code_block_results.name for _, code_block_results in matching_code_block_results}) + )}' code_block_comparison = { name: get_statistic(code_block_results) for name, code_block_results in matching_code_block_results } diff --git a/src/gpu_tracker/tracker.py b/src/gpu_tracker/tracker.py index 273c3c3..f115a39 100644 --- a/src/gpu_tracker/tracker.py +++ b/src/gpu_tracker/tracker.py @@ -468,6 +468,11 @@ def to_json(self) -> dict[str, dict]: """ Constructs a dictionary of the computational-resource-usage measurements and their units. """ + if self.resource_usage is None: + raise RuntimeError( + 'Cannot display the tracker in string or JSON format before tracking completes. Exit the content manager or call the ' + 'stop() method before calling to_json() or str()' + ) return dclass.asdict(self.resource_usage) diff --git a/tests/data/sub-tracking-results/comparison_max.json b/tests/data/sub-tracking-results/comparison_max.json new file mode 100644 index 0000000..00aa5d1 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_max.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 1.5764930560000001, "B": 1.9856220160000002}, "descendants_ram": {"B": 11.457716224, "A": 11.671842816000002}, "combined_ram": {"B": 11.522973696, "A": 11.736928256}, "system_ram": {"A": 60.485840896000006, "B": 61.126602752000004}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.216, "B": 0.216}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 15, "B": 15}, "descendants_n_threads": {"A": 36, "B": 36}, "combined_n_threads": {"A": 51, "B": 51}, "cpu_system_sum_utilization_percent": {"B": 486.1, "A": 535.8}, "cpu_system_hardware_utilization_percent": {"B": 40.50833333333333, "A": 44.65}, "cpu_main_sum_utilization_percent": {"B": 99.1, "A": 101.6}, "cpu_main_hardware_utilization_percent": {"B": 8.258333333333333, "A": 8.466666666666667}, "cpu_descendants_sum_utilization_percent": {"B": 284.70000000000005, "A": 300.0}, "cpu_descendants_hardware_utilization_percent": {"B": 23.725000000000005, "A": 25.0}, "cpu_combined_sum_utilization_percent": {"B": 284.70000000000005, "A": 300.0}, "cpu_combined_hardware_utilization_percent": {"B": 23.725000000000005, "A": 25.0}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 0.08691712, "B": 0.087109632}, "Y": {"A": 1.5764930560000001, "B": 1.9856220160000002}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 1.2588974080000002, "B": 1.2777676800000002}}, "descendants_ram": {"X": {"B": 11.457716224, "A": 11.671842816000002}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"B": 11.522973696, "A": 11.736928256}, "Y": {"A": 1.5764930560000001, "B": 1.9856220160000002}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 1.2588974080000002, "B": 1.2777676800000002}}, "system_ram": {"X": {"A": 60.485840896000006, "B": 61.126602752000004}, "Y": {"A": 50.91651584, "B": 53.325389824000005}, "tmp.py:7 -> tmp.py:9": {"A": 50.539143168, "B": 52.042661888000005}, "tmp.py:37 -> tmp.py:38": {"A": 51.044614144, "B": 52.36287488000001}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 0.216, "B": 0.216}, "Y": {"A": 0.216, "B": 0.216}, "tmp.py:7 -> tmp.py:9": {"A": 0.216, "B": 0.216}, "tmp.py:37 -> tmp.py:38": {"A": 0.216, "B": 0.216}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 15.0, "B": 15.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "descendants_n_threads": {"X": {"A": 36.0, "B": 36.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 51.0, "B": 51.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "cpu_system_sum_utilization_percent": {"X": {"B": 486.1, "A": 535.8}, "Y": {"B": 149.20000000000002, "A": 158.79999999999998}, "tmp.py:7 -> tmp.py:9": {"A": 115.69999999999999, "B": 116.5}, "tmp.py:37 -> tmp.py:38": {"B": 165.8, "A": 405.5}}, "cpu_system_hardware_utilization_percent": {"X": {"B": 40.50833333333333, "A": 44.65}, "Y": {"B": 12.433333333333335, "A": 13.233333333333333}, "tmp.py:7 -> tmp.py:9": {"A": 9.641666666666666, "B": 9.708333333333334}, "tmp.py:37 -> tmp.py:38": {"B": 13.816666666666668, "A": 33.791666666666664}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 99.1, "A": 101.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 97.7, "A": 97.9}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 8.258333333333333, "A": 8.466666666666667}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 8.141666666666667, "A": 8.158333333333333}}, "cpu_descendants_sum_utilization_percent": {"X": {"B": 284.70000000000005, "A": 300.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"B": 23.725000000000005, "A": 25.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"B": 284.70000000000005, "A": 300.0}, "Y": {"B": 99.1, "A": 101.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 97.7, "A": 97.9}}, "cpu_combined_hardware_utilization_percent": {"X": {"B": 23.725000000000005, "A": 25.0}, "Y": {"B": 8.258333333333333, "A": 8.466666666666667}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 8.141666666666667, "A": 8.158333333333333}}}, "code_block_compute_time": {"X": {"A": 3.8075716495513916, "B": 3.8075716495513916}, "Y": {"A": 1.847283124923706, "B": 1.847283124923706}, "tmp.py:7 -> tmp.py:9": {"A": 1.0934793949127197, "B": 1.0934793949127197}, "tmp.py:37 -> tmp.py:38": {"A": 1.2065885066986084, "B": 1.2065885066986084}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_max.txt b/tests/data/sub-tracking-results/comparison_max.txt new file mode 100644 index 0000000..c2d23da --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_max.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 1.57649306 1.98562202 + Descendants Ram: + B A + 11.45771622 11.67184282 + Combined Ram: + B A + 11.5229737 11.73692826 + System Ram: + A B + 60.4858409 61.12660275 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 15 15 + Descendants N Threads: + A B + 36 36 + Combined N Threads: + A B + 51 51 + Cpu System Sum Utilization Percent: + B A + 486.1 535.8 + Cpu System Hardware Utilization Percent: + B A + 40.50833333 44.65 + Cpu Main Sum Utilization Percent: + B A + 99.1 101.6 + Cpu Main Hardware Utilization Percent: + B A + 8.25833333 8.46666667 + Cpu Descendants Sum Utilization Percent: + B A + 284.7 300.0 + Cpu Descendants Hardware Utilization Percent: + B A + 23.725 25.0 + Cpu Combined Sum Utilization Percent: + B A + 284.7 300.0 + Cpu Combined Hardware Utilization Percent: + B A + 23.725 25.0 +Code Block Resource Usage: + Main Ram: + X: + A B + 0.08691712 0.08710963 + Y: + A B + 1.57649306 1.98562202 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 1.25889741 1.27776768 + Descendants Ram: + X: + B A + 11.45771622 11.67184282 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + B A + 11.5229737 11.73692826 + Y: + A B + 1.57649306 1.98562202 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 1.25889741 1.27776768 + System Ram: + X: + A B + 60.4858409 61.12660275 + Y: + A B + 50.91651584 53.32538982 + tmp.py:7 -> tmp.py:9: + A B + 50.53914317 52.04266189 + tmp.py:37 -> tmp.py:38: + A B + 51.04461414 52.36287488 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.216 0.216 + Y: + A B + 0.216 0.216 + tmp.py:7 -> tmp.py:9: + A B + 0.216 0.216 + tmp.py:37 -> tmp.py:38: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 15.0 15.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Descendants N Threads: + X: + A B + 36.0 36.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 51.0 51.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Cpu System Sum Utilization Percent: + X: + B A + 486.1 535.8 + Y: + B A + 149.2 158.8 + tmp.py:7 -> tmp.py:9: + A B + 115.7 116.5 + tmp.py:37 -> tmp.py:38: + B A + 165.8 405.5 + Cpu System Hardware Utilization Percent: + X: + B A + 40.50833333 44.65 + Y: + B A + 12.43333333 13.23333333 + tmp.py:7 -> tmp.py:9: + A B + 9.64166667 9.70833333 + tmp.py:37 -> tmp.py:38: + B A + 13.81666667 33.79166667 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 99.1 101.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 97.7 97.9 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 8.25833333 8.46666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 8.14166667 8.15833333 + Cpu Descendants Sum Utilization Percent: + X: + B A + 284.7 300.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + B A + 23.725 25.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + B A + 284.7 300.0 + Y: + B A + 99.1 101.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 97.7 97.9 + Cpu Combined Hardware Utilization Percent: + X: + B A + 23.725 25.0 + Y: + B A + 8.25833333 8.46666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 8.14166667 8.15833333 +Code Block Compute Time: + X: + A B + 3.80757165 3.80757165 + Y: + A B + 1.84728312 1.84728312 + tmp.py:7 -> tmp.py:9: + A B + 1.09347939 1.09347939 + tmp.py:37 -> tmp.py:38: + A B + 1.20658851 1.20658851 diff --git a/tests/data/sub-tracking-results/comparison_mean.json b/tests/data/sub-tracking-results/comparison_mean.json new file mode 100644 index 0000000..ef90c75 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_mean.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 0.6006015590399999, "B": 0.61011197952}, "descendants_ram": {"B": 2.43691864064, "A": 2.50271186944}, "combined_ram": {"B": 3.036548300799999, "A": 3.09283651584}, "system_ram": {"A": 52.59960434688001, "B": 53.58121992192}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.2160000000000001, "B": 0.2160000000000001}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 13.96, "B": 13.96}, "descendants_n_threads": {"A": 17.28, "B": 17.28}, "combined_n_threads": {"A": 31.24, "B": 31.24}, "cpu_system_sum_utilization_percent": {"B": 230.76, "A": 242.06}, "cpu_system_hardware_utilization_percent": {"B": 19.230000000000004, "A": 20.17166666666667}, "cpu_main_sum_utilization_percent": {"A": 48.172, "B": 48.72}, "cpu_main_hardware_utilization_percent": {"A": 4.014333333333333, "B": 4.06}, "cpu_descendants_sum_utilization_percent": {"B": 110.17999999999998, "A": 111.512}, "cpu_descendants_hardware_utilization_percent": {"B": 9.181666666666667, "A": 9.292666666666667}, "cpu_combined_sum_utilization_percent": {"B": 158.89999999999998, "A": 159.68400000000003}, "cpu_combined_hardware_utilization_percent": {"B": 13.241666666666669, "A": 13.307000000000002}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 0.086916096, "B": 0.08710894933333334}, "Y": {"A": 1.028769792, "B": 1.1594874880000001}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"B": 1.0999783424000003, "A": 1.1211156480000002}}, "descendants_ram": {"X": {"B": 5.076913834666667, "A": 5.213983061333333}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"B": 5.142184618666666, "A": 5.279072256000001}, "Y": {"A": 1.028769792, "B": 1.1594874880000001}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"B": 1.0999783424000003, "A": 1.1211156480000002}}, "system_ram": {"X": {"A": 54.712742229333344, "B": 55.171221503999995}, "Y": {"A": 50.42655232, "B": 52.71240704}, "tmp.py:7 -> tmp.py:9": {"A": 50.539143168, "B": 52.042661888000005}, "tmp.py:37 -> tmp.py:38": {"A": 50.689142272, "B": 52.000836403200005}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 0.21600000000000005, "B": 0.21600000000000005}, "Y": {"A": 0.216, "B": 0.216}, "tmp.py:7 -> tmp.py:9": {"A": 0.216, "B": 0.216}, "tmp.py:37 -> tmp.py:38": {"A": 0.216, "B": 0.21600000000000003}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 15.0, "B": 15.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "descendants_n_threads": {"X": {"A": 36.0, "B": 36.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 51.0, "B": 51.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "cpu_system_sum_utilization_percent": {"X": {"A": 321.575, "B": 330.06666666666666}, "Y": {"B": 144.35000000000002, "A": 152.05}, "tmp.py:7 -> tmp.py:9": {"A": 115.69999999999999, "B": 116.5}, "tmp.py:37 -> tmp.py:38": {"B": 140.3, "A": 190.02499999999998}}, "cpu_system_hardware_utilization_percent": {"X": {"A": 26.797916666666666, "B": 27.50555555555556}, "Y": {"B": 12.029166666666669, "A": 12.670833333333334}, "tmp.py:7 -> tmp.py:9": {"A": 9.641666666666666, "B": 9.708333333333334}, "tmp.py:37 -> tmp.py:38": {"B": 11.691666666666666, "A": 15.835416666666665}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 92.85, "A": 93.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"A": 93.625, "B": 93.82}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 7.737499999999999, "A": 7.8}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"A": 7.802083333333333, "B": 7.818333333333333}}, "cpu_descendants_sum_utilization_percent": {"X": {"B": 229.54166666666663, "A": 232.3166666666667}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"B": 19.128472222222225, "A": 19.35972222222222}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"B": 229.54166666666663, "A": 232.3166666666667}, "Y": {"B": 92.85, "A": 93.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"A": 93.625, "B": 93.82}}, "cpu_combined_hardware_utilization_percent": {"X": {"B": 19.128472222222225, "A": 19.35972222222222}, "Y": {"B": 7.737499999999999, "A": 7.8}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"A": 7.802083333333333, "B": 7.818333333333333}}}, "code_block_compute_time": {"X": {"A": 3.735934352874756, "B": 3.735934352874756}, "Y": {"A": 1.847283124923706, "B": 1.847283124923706}, "tmp.py:7 -> tmp.py:9": {"A": 1.0934793949127197, "B": 1.0934793949127197}, "tmp.py:37 -> tmp.py:38": {"A": 1.1289910554885865, "B": 1.1289910554885865}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_mean.txt b/tests/data/sub-tracking-results/comparison_mean.txt new file mode 100644 index 0000000..793ea82 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_mean.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 0.60060156 0.61011198 + Descendants Ram: + B A + 2.43691864 2.50271187 + Combined Ram: + B A + 3.0365483 3.09283652 + System Ram: + A B + 52.59960435 53.58121992 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 13.96 13.96 + Descendants N Threads: + A B + 17.28 17.28 + Combined N Threads: + A B + 31.24 31.24 + Cpu System Sum Utilization Percent: + B A + 230.76 242.06 + Cpu System Hardware Utilization Percent: + B A + 19.23 20.17166667 + Cpu Main Sum Utilization Percent: + A B + 48.172 48.72 + Cpu Main Hardware Utilization Percent: + A B + 4.01433333 4.06 + Cpu Descendants Sum Utilization Percent: + B A + 110.18 111.512 + Cpu Descendants Hardware Utilization Percent: + B A + 9.18166667 9.29266667 + Cpu Combined Sum Utilization Percent: + B A + 158.9 159.684 + Cpu Combined Hardware Utilization Percent: + B A + 13.24166667 13.307 +Code Block Resource Usage: + Main Ram: + X: + A B + 0.0869161 0.08710895 + Y: + A B + 1.02876979 1.15948749 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + B A + 1.09997834 1.12111565 + Descendants Ram: + X: + B A + 5.07691383 5.21398306 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + B A + 5.14218462 5.27907226 + Y: + A B + 1.02876979 1.15948749 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + B A + 1.09997834 1.12111565 + System Ram: + X: + A B + 54.71274223 55.1712215 + Y: + A B + 50.42655232 52.71240704 + tmp.py:7 -> tmp.py:9: + A B + 50.53914317 52.04266189 + tmp.py:37 -> tmp.py:38: + A B + 50.68914227 52.0008364 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.216 0.216 + Y: + A B + 0.216 0.216 + tmp.py:7 -> tmp.py:9: + A B + 0.216 0.216 + tmp.py:37 -> tmp.py:38: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 15.0 15.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Descendants N Threads: + X: + A B + 36.0 36.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 51.0 51.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Cpu System Sum Utilization Percent: + X: + A B + 321.575 330.06666667 + Y: + B A + 144.35 152.05 + tmp.py:7 -> tmp.py:9: + A B + 115.7 116.5 + tmp.py:37 -> tmp.py:38: + B A + 140.3 190.025 + Cpu System Hardware Utilization Percent: + X: + A B + 26.79791667 27.50555556 + Y: + B A + 12.02916667 12.67083333 + tmp.py:7 -> tmp.py:9: + A B + 9.64166667 9.70833333 + tmp.py:37 -> tmp.py:38: + B A + 11.69166667 15.83541667 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 92.85 93.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + A B + 93.625 93.82 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 7.7375 7.8 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + A B + 7.80208333 7.81833333 + Cpu Descendants Sum Utilization Percent: + X: + B A + 229.54166667 232.31666667 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + B A + 19.12847222 19.35972222 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + B A + 229.54166667 232.31666667 + Y: + B A + 92.85 93.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + A B + 93.625 93.82 + Cpu Combined Hardware Utilization Percent: + X: + B A + 19.12847222 19.35972222 + Y: + B A + 7.7375 7.8 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + A B + 7.80208333 7.81833333 +Code Block Compute Time: + X: + A B + 3.73593435 3.73593435 + Y: + A B + 1.84728312 1.84728312 + tmp.py:7 -> tmp.py:9: + A B + 1.09347939 1.09347939 + tmp.py:37 -> tmp.py:38: + A B + 1.12899106 1.12899106 diff --git a/tests/data/sub-tracking-results/comparison_min.json b/tests/data/sub-tracking-results/comparison_min.json new file mode 100644 index 0000000..ddbdebc --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_min.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 0.086913024, "B": 0.087105536}, "descendants_ram": {"A": 0.0, "B": 0.0}, "combined_ram": {"B": 0.277377024, "A": 0.27824128000000004}, "system_ram": {"A": 49.029361664, "B": 49.073319936000004}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.216, "B": 0.216}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 13, "B": 13}, "descendants_n_threads": {"A": 0, "B": 0}, "combined_n_threads": {"A": 13, "B": 13}, "cpu_system_sum_utilization_percent": {"A": 115.10000000000001, "B": 116.5}, "cpu_system_hardware_utilization_percent": {"A": 9.591666666666667, "B": 9.708333333333334}, "cpu_main_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_main_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_descendants_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_descendants_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_combined_sum_utilization_percent": {"A": 85.6, "B": 86.6}, "cpu_combined_hardware_utilization_percent": {"A": 7.133333333333333, "B": 7.216666666666666}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 0.086913024, "B": 0.087105536}, "Y": {"B": 0.33335296000000003, "A": 0.48104652800000003}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 0.861704192, "B": 0.8678359040000001}}, "descendants_ram": {"X": {"B": 0.211996672, "A": 0.21314764800000002}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"B": 0.277377024, "A": 0.27824128000000004}, "Y": {"B": 0.33335296000000003, "A": 0.48104652800000003}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 0.861704192, "B": 0.8678359040000001}}, "system_ram": {"X": {"A": 49.029361664, "B": 49.073319936000004}, "Y": {"A": 49.9365888, "B": 52.099424256000006}, "tmp.py:7 -> tmp.py:9": {"A": 50.539143168, "B": 52.042661888000005}, "tmp.py:37 -> tmp.py:38": {"A": 50.392174592, "B": 51.668799488000005}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 0.216, "B": 0.216}, "Y": {"A": 0.216, "B": 0.216}, "tmp.py:7 -> tmp.py:9": {"A": 0.216, "B": 0.216}, "tmp.py:37 -> tmp.py:38": {"A": 0.216, "B": 0.216}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 15.0, "B": 15.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "descendants_n_threads": {"X": {"A": 36.0, "B": 36.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 51.0, "B": 51.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "cpu_system_sum_utilization_percent": {"X": {"A": 122.60000000000001, "B": 157.3}, "Y": {"B": 139.5, "A": 145.3}, "tmp.py:7 -> tmp.py:9": {"A": 115.69999999999999, "B": 116.5}, "tmp.py:37 -> tmp.py:38": {"A": 115.10000000000001, "B": 128.0}}, "cpu_system_hardware_utilization_percent": {"X": {"A": 10.216666666666667, "B": 13.108333333333334}, "Y": {"B": 11.625, "A": 12.108333333333334}, "tmp.py:7 -> tmp.py:9": {"A": 9.641666666666666, "B": 9.708333333333334}, "tmp.py:37 -> tmp.py:38": {"A": 9.591666666666667, "B": 10.666666666666666}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 85.6, "B": 86.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 89.6, "A": 90.4}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 7.133333333333333, "B": 7.216666666666666}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 7.466666666666666, "A": 7.533333333333334}}, "cpu_descendants_sum_utilization_percent": {"X": {"B": 91.1, "A": 91.4}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"B": 7.591666666666666, "A": 7.616666666666667}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"B": 91.1, "A": 91.4}, "Y": {"A": 85.6, "B": 86.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 89.6, "A": 90.4}}, "cpu_combined_hardware_utilization_percent": {"X": {"B": 7.591666666666666, "A": 7.616666666666667}, "Y": {"A": 7.133333333333333, "B": 7.216666666666666}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 7.466666666666666, "A": 7.533333333333334}}}, "code_block_compute_time": {"X": {"A": 3.612563133239746, "B": 3.612563133239746}, "Y": {"A": 1.847283124923706, "B": 1.847283124923706}, "tmp.py:7 -> tmp.py:9": {"A": 1.0934793949127197, "B": 1.0934793949127197}, "tmp.py:37 -> tmp.py:38": {"A": 1.0983691215515137, "B": 1.0983691215515137}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_min.txt b/tests/data/sub-tracking-results/comparison_min.txt new file mode 100644 index 0000000..08bd1ad --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_min.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 0.08691302 0.08710554 + Descendants Ram: + A B + 0.0 0.0 + Combined Ram: + B A + 0.27737702 0.27824128 + System Ram: + A B + 49.02936166 49.07331994 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 13 13 + Descendants N Threads: + A B + 0 0 + Combined N Threads: + A B + 13 13 + Cpu System Sum Utilization Percent: + A B + 115.1 116.5 + Cpu System Hardware Utilization Percent: + A B + 9.59166667 9.70833333 + Cpu Main Sum Utilization Percent: + A B + 0.0 0.0 + Cpu Main Hardware Utilization Percent: + A B + 0.0 0.0 + Cpu Descendants Sum Utilization Percent: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + A B + 85.6 86.6 + Cpu Combined Hardware Utilization Percent: + A B + 7.13333333 7.21666667 +Code Block Resource Usage: + Main Ram: + X: + A B + 0.08691302 0.08710554 + Y: + B A + 0.33335296 0.48104653 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 0.86170419 0.8678359 + Descendants Ram: + X: + B A + 0.21199667 0.21314765 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + B A + 0.27737702 0.27824128 + Y: + B A + 0.33335296 0.48104653 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 0.86170419 0.8678359 + System Ram: + X: + A B + 49.02936166 49.07331994 + Y: + A B + 49.9365888 52.09942426 + tmp.py:7 -> tmp.py:9: + A B + 50.53914317 52.04266189 + tmp.py:37 -> tmp.py:38: + A B + 50.39217459 51.66879949 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.216 0.216 + Y: + A B + 0.216 0.216 + tmp.py:7 -> tmp.py:9: + A B + 0.216 0.216 + tmp.py:37 -> tmp.py:38: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 15.0 15.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Descendants N Threads: + X: + A B + 36.0 36.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 51.0 51.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Cpu System Sum Utilization Percent: + X: + A B + 122.6 157.3 + Y: + B A + 139.5 145.3 + tmp.py:7 -> tmp.py:9: + A B + 115.7 116.5 + tmp.py:37 -> tmp.py:38: + A B + 115.1 128.0 + Cpu System Hardware Utilization Percent: + X: + A B + 10.21666667 13.10833333 + Y: + B A + 11.625 12.10833333 + tmp.py:7 -> tmp.py:9: + A B + 9.64166667 9.70833333 + tmp.py:37 -> tmp.py:38: + A B + 9.59166667 10.66666667 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 85.6 86.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 89.6 90.4 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 7.13333333 7.21666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 7.46666667 7.53333333 + Cpu Descendants Sum Utilization Percent: + X: + B A + 91.1 91.4 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + B A + 7.59166667 7.61666667 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + B A + 91.1 91.4 + Y: + A B + 85.6 86.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 89.6 90.4 + Cpu Combined Hardware Utilization Percent: + X: + B A + 7.59166667 7.61666667 + Y: + A B + 7.13333333 7.21666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 7.46666667 7.53333333 +Code Block Compute Time: + X: + A B + 3.61256313 3.61256313 + Y: + A B + 1.84728312 1.84728312 + tmp.py:7 -> tmp.py:9: + A B + 1.09347939 1.09347939 + tmp.py:37 -> tmp.py:38: + A B + 1.09836912 1.09836912 diff --git a/tests/data/sub-tracking-results/comparison_std.json b/tests/data/sub-tracking-results/comparison_std.json new file mode 100644 index 0000000..bead327 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_std.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 0.5269852839221266, "B": 0.5615874566864882}, "descendants_ram": {"B": 3.784130954436396, "A": 3.8823366111559143}, "combined_ram": {"B": 3.4701130113065624, "A": 3.568347601613889}, "system_ram": {"B": 3.1267382538015376, "A": 3.467963700517139}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.0, "B": 0.0}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 1.0, "B": 1.0}, "descendants_n_threads": {"A": 17.97220075561143, "B": 17.97220075561143}, "combined_n_threads": {"A": 18.973665961010276, "B": 18.973665961010276}, "cpu_system_sum_utilization_percent": {"B": 112.8486916184676, "A": 119.47923668989523}, "cpu_system_hardware_utilization_percent": {"B": 9.404057634872295, "A": 9.956603057491263}, "cpu_main_sum_utilization_percent": {"A": 46.379327463860456, "B": 46.86729349983846}, "cpu_main_hardware_utilization_percent": {"A": 3.8649439553217033, "B": 3.905607791653205}, "cpu_descendants_sum_utilization_percent": {"B": 124.94461172855756, "A": 128.9478974469921}, "cpu_descendants_hardware_utilization_percent": {"B": 10.412050977379797, "A": 10.745658120582675}, "cpu_combined_sum_utilization_percent": {"B": 84.09375719992553, "A": 89.63866433632305}, "cpu_combined_hardware_utilization_percent": {"B": 7.007813099993776, "A": 7.469888694693585}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 1.8524857010835653e-06, "B": 1.8524857010835653e-06}, "Y": {"A": 0.7745976683760593, "B": 0.7745976683760593}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.1329195703851683, "B": 0.1329195703851683}}, "descendants_ram": {"X": {"A": 4.339827962944292, "B": 4.339827962944292}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"A": 4.3398268840710665, "B": 4.3398268840710665}, "Y": {"A": 0.7745976683760593, "B": 0.7745976683760593}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.1329195703851683, "B": 0.1329195703851683}}, "system_ram": {"X": {"A": 4.2288752007728245, "B": 4.2288752007728245}, "Y": {"A": 0.692913055052061, "B": 0.692913055052061}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.19767745567246933, "B": 0.19767745567246933}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 5.797950651443767e-17, "B": 5.797950651443767e-17}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_n_threads": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_system_sum_utilization_percent": {"X": {"A": 108.67268725188244, "B": 108.67268725188244}, "Y": {"A": 9.545941546018371, "B": 9.545941546018371}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 100.27071570788466, "B": 100.27071570788466}}, "cpu_system_hardware_utilization_percent": {"X": {"A": 9.056057270990204, "B": 9.056057270990204}, "Y": {"A": 0.7954951288348647, "B": 0.7954951288348647}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 8.355892975657055, "B": 8.355892975657055}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 11.313708498984761, "B": 11.313708498984761}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 2.5998626337338906, "B": 2.5998626337338906}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.9428090415820638, "B": 0.9428090415820638}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.21665521947782385, "B": 0.21665521947782385}}, "cpu_descendants_sum_utilization_percent": {"X": {"A": 84.69719146715694, "B": 84.69719146715694}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"A": 7.0580992889297445, "B": 7.0580992889297445}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"A": 84.69719146715694, "B": 84.69719146715694}, "Y": {"A": 11.313708498984761, "B": 11.313708498984761}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 2.5998626337338906, "B": 2.5998626337338906}}, "cpu_combined_hardware_utilization_percent": {"X": {"A": 7.0580992889297445, "B": 7.0580992889297445}, "Y": {"A": 0.9428090415820638, "B": 0.9428090415820638}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.21665521947782385, "B": 0.21665521947782385}}}, "code_block_compute_time": {"X": {"A": 0.07070437511070823, "B": 0.07070437511070823}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.035251379040035336, "B": 0.035251379040035336}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_std.txt b/tests/data/sub-tracking-results/comparison_std.txt new file mode 100644 index 0000000..70dbf99 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_std.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 0.52698528 0.56158746 + Descendants Ram: + B A + 3.78413095 3.88233661 + Combined Ram: + B A + 3.47011301 3.5683476 + System Ram: + B A + 3.12673825 3.4679637 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.0 0.0 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 1.0 1.0 + Descendants N Threads: + A B + 17.97220076 17.97220076 + Combined N Threads: + A B + 18.97366596 18.97366596 + Cpu System Sum Utilization Percent: + B A + 112.84869162 119.47923669 + Cpu System Hardware Utilization Percent: + B A + 9.40405763 9.95660306 + Cpu Main Sum Utilization Percent: + A B + 46.37932746 46.8672935 + Cpu Main Hardware Utilization Percent: + A B + 3.86494396 3.90560779 + Cpu Descendants Sum Utilization Percent: + B A + 124.94461173 128.94789745 + Cpu Descendants Hardware Utilization Percent: + B A + 10.41205098 10.74565812 + Cpu Combined Sum Utilization Percent: + B A + 84.0937572 89.63866434 + Cpu Combined Hardware Utilization Percent: + B A + 7.0078131 7.46988869 +Code Block Resource Usage: + Main Ram: + X: + A B + 1.85e-06 1.85e-06 + Y: + A B + 0.77459767 0.77459767 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.13291957 0.13291957 + Descendants Ram: + X: + A B + 4.33982796 4.33982796 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + A B + 4.33982688 4.33982688 + Y: + A B + 0.77459767 0.77459767 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.13291957 0.13291957 + System Ram: + X: + A B + 4.2288752 4.2288752 + Y: + A B + 0.69291306 0.69291306 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.19767746 0.19767746 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants N Threads: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu System Sum Utilization Percent: + X: + A B + 108.67268725 108.67268725 + Y: + A B + 9.54594155 9.54594155 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 100.27071571 100.27071571 + Cpu System Hardware Utilization Percent: + X: + A B + 9.05605727 9.05605727 + Y: + A B + 0.79549513 0.79549513 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 8.35589298 8.35589298 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 11.3137085 11.3137085 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 2.59986263 2.59986263 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.94280904 0.94280904 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.21665522 0.21665522 + Cpu Descendants Sum Utilization Percent: + X: + A B + 84.69719147 84.69719147 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + A B + 7.05809929 7.05809929 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + A B + 84.69719147 84.69719147 + Y: + A B + 11.3137085 11.3137085 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 2.59986263 2.59986263 + Cpu Combined Hardware Utilization Percent: + X: + A B + 7.05809929 7.05809929 + Y: + A B + 0.94280904 0.94280904 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.21665522 0.21665522 +Code Block Compute Time: + X: + A B + 0.07070438 0.07070438 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.03525138 0.03525138 diff --git a/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv b/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv index d71cf27..de77e34 100644 --- a/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv +++ b/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv @@ -1,6 +1,7 @@ process_id,code_block_name,position,timestamp 1723727,Y,0,1745449624.6481984 1723727,Y,1,1745449626.4954815 +1723727,Y,0,1745449627.6054815 1723727,tmp.py:9,0,1745449626.5598955 1723727,tmp.py:9,1,1745449627.653375 1723727,tmp.py:38,0,1745449627.6730132 diff --git a/tests/data/sub-tracking-results/invalid1.csv b/tests/data/sub-tracking-results/invalid1.csv new file mode 100644 index 0000000..1fb2c72 --- /dev/null +++ b/tests/data/sub-tracking-results/invalid1.csv @@ -0,0 +1,6 @@ +process_id,code_block_name,position,timestamp +1723811,X,0,1745449609.7528221 +1723811,X,1,1745449609.7528222 +1723812,X,0,1745449609.7528223 +1723812,X,1,1745449613.5325918 +1723811,X,1,1745449613.5325919 diff --git a/tests/data/sub-tracking-results/invalid2.csv b/tests/data/sub-tracking-results/invalid2.csv new file mode 100644 index 0000000..189d8f5 --- /dev/null +++ b/tests/data/sub-tracking-results/invalid2.csv @@ -0,0 +1,4 @@ +process_id,code_block_name,position,timestamp +1723811,X,0,1745449613.5388888 +1723811,X,0,1745449613.5325918 +1723811,X,1,1745449609.7528222 diff --git a/tests/data/sub-tracking-results/results-A.pkl b/tests/data/sub-tracking-results/results-A.pkl index 1999d1b60b403dc193ee96f797ed43f467b8b274..d692f0fd14f1e49ff85cc99ccc23b614c1b14146 100644 GIT binary patch literal 12036 zcmeHMYjhMv7M?s!NDN8LLj)ouJd(hUgs6y!QzWc_1A!33V?&tqOghuk%q!E~CQ$=H zRCV;QA zsX3>r>(;$hx2kT{cW+Hw{OWUN6G?Ke(=XY?0OMG|2F!wF*S{4xvr4kxg&SO6=S)_R z+@heYQi>KUtxBPMmG5pgz_{ItoZ)A@^^9P4_yVlCA;5T8MNXc`h|C@2J17xv^Ed;F zoJX^}ydoR$GH$`_VZ4l!4G4;yTITjS7EDv?FkqpaqlwtuF1uh3$|!PQo89sxNJG0UTpERSSWV-ElT?ewLMObxlgI6sJQbxNF=jbM8)1-s}ypX za-!g1+)Mz3xI8(wAw=vpgO~}#%9A#a+_dVXWvfM52E%j1xT5kTb(2gciq!GOq9rf9 z{sztEscgOeNLO=e`22um z&Xi^kvMqbeff_T7&hHAR|6(kK%Ni1xboMe|=U|(J^+62Mv za$p0ony}em`>@91oXg;{xLj;kT)$AO8cghj?D8iQ=6B2(dyZan`N8atuV)XZ6=0In z{#elxLi)|-14*N9ub{aiOeQ+grENTHSYNGwYFmedlS&OF*b1`7JKAXNOL3~YgC-HK zhPgC}Z2rkXG>P=SD-cF9jSVAp!-cn%3&0M>_IMA^^xyUJpj(d8dsr1X-MKO$o>t(l zvT>P%cO=oIu9CkW+&T45nxrk2Nng>Z&Gzsm9bV}EMCFTlOOKDBm6-hAJy}mrDxpc^ z`@Ffkf*WK{jVwI%rVXQSSx;-pvFW>N9!#Cs84@75w; z&Fx3_epG-q9bvmxj+%tV{&V?u=kYXDcPQ}})p>QOt@HU~ds>I0GLAX&Z*CTvb!2f{ z@p}W2qS|MiHJ+ZEr~u-~*Mo5>w%}@~F^|g+z1nreljA%tuL3#KtjO^mmcbQ4tXIfY8Slk@L4|~ojn9Sg z`F@bW(0qR=!w1e@s_^;Hg6~K<$-}t3Ht;3@@#|T^!2)1=MM0&cI(#0x%gffQ)jmRt zAhI5Am2J&wdZ(wyCQJOr}`*W!Y`7KN~N+ahO06Qa!;FY#2a-yxl%{a+tE;zovCc#!O`2qGBk+r!QJL?vd72rV2DS`+F z;;@k}EvLErF(5`v!8Wf&9Q4Qgfvp;HB7QEB?+_A z1P#`})JC}dU_m+JrOC-omy@x#hzMWlb)$kL z@Rm|F4AHSYG70v8B#JA9gw+R~qq$0Hv)~s%FT59LjW`o=)`YWraMp^mHk>Vi%(z7O z42&|D+-NQ#5nwz60Hg3W;FJ?d_$2@iVKhm+n{i9H?bo<13Bo=bL}#!}0q~BKRpgQw zNx%nk8WGpd1wS2X_|U1w$?%m>Z&mmzwWx+-g}`rv!fJwp26tt>7WqMJrv|8i7o15> zFNYR#kc@#VxdG&uIf|SH;hzm#v)>0HrJz_-k5Wv3+((PquA=g9ohgNP*cTaxJq6g)ABo1pBG3h~8#72`Dsq`|n zY@32ewh}N!h;ry{RkPgj4%}qGvA!FUz^^Ua^ZMEp(q#i@+f#dm}Lce;d zryk1b@mpwbS`7j(^=qtJQuQGp>>rp9dM=n+RG0U57W&HzV^@iZZxREv9_K zXFDEwq&92k?SBr)?|fa8f%?fq6BX}7AE$?@p{sTH*6kk+-um!udi`Q$%^Uxy zxOzkl8h`WA_U8xWBO`z1lV>`f7+H;S7ENLnjvIiC^~mwXG8|t-12qsX1iki1_nF}a zh^EWlK)}`;2z>v60P#ck3b@&c55mvDUy-~I$u8OX-O#Gyh2~DoRrptdW{u_d;39_K zs}>zle4h?NL|l#J%3|*T9y&1g?`nSN8;tSBQokz*9bW`LB=(oZ!a^Cae>$dhhyDC1 zyu8?FXiY z7ui)5A5)0bC(ml6KRFu*Ft+RO$2y;053J~4KSP>g7*FRinR==xb~e@=av>^Fi*@&Qj z>Gc=T^L+ayqWqWQA5s_Z`!F~?y`I|G_1)}|$AWa@GWE3X`qjPbm*y6raRt*$pA$c_ zbd6io@Wk(iA^xjx54Vmu88(iwy%*)ld!hEk0g)i}d_W!wd!PF*@QnF0f9|_Hlus5t zezU*j*3w%vv`e`xFfViUkxGfNFO z^Yf4q5sd9w8G9aY9CA9Sr(R#X<>8OFEWL7i5a6>U~o<;1gx2_Dcl(11}vL zaev(I;5O*>gyl~?vvAtJAe{_%8CmJsbIz)axu|K~`p3p>9fZC)__w_ebjG8J1GBf( zY;L`1NN%j`1j?zn&M%m^ewFsTa1 zF^zFNJtn7#CsuoMtcf+%+G_l3>dz5Ut+lC*rWhNKe>MKZh)LDn_hx7Fc35B)V{=Y< zd(M0F?z{KCH*@cM-@R{ZwQEat8eGJl7PnyJJ+yft>DvPLp!mPy$m zCTWEn#A1~F{$>Uwbke8&~+}vmHL&23n&r7EI5-H6Z zFNSkw+D>~w$eJvMmiviKR*7jqEIVTB@GWbu>UZd+V9C_9K54weuAz1<aXPOOkQ?*7`ccUXJ1HoZ~7>b zG^Oh&8{AX$BxyF1o9ZM7`BbJD}1w{EpcY6H9WtwETVTK$7$i^p3a8B&l-G+fR=8MTSiB+A!46<$-N@w*2mt*fUL}4M&}rUK8HE zJ2~6hy2cZCRz{PgMb(@4nfFAIq=&F!ZmVa0tIDpCYo%-AhkV}bXR#$8r4G-!Gn!o3 z@cNQtn?EZdNv`6jn+{#{J50AUF^#K6PA9o0{B6Z<%jFo7RC+4>M+M2HB&oLHgYyTg z2a}{ME8X-@>s*pFw`pl@#@T){$!oDL1}$kZUIH8yd-;b8WlSZ*=}a^SK}J`jlc_M~ zIDrNSFPj&l(THJ67sV*rGKfN?mglb)4r6@!id`Hwrvx=!DT!JKL!%DImr3||*E&%w zlPe%JX7fQiTL2X(W(%QhsW#E4Ho`&MoJI(LAVeu+I5PtghLh)HO0?PKFxi|;ncVHl zRdGDy@Qz_Y6p>-!9;C338tYTwBgJ@?_y~Q_6DK%~0&lb1UZ8oK%V~5o9#{%5i79B* zN_)yGXb%&ZB?hm=Y2>XQhAsnyOC;Aq(BplpA&3LbXnOg@T3y9Zf~oDrq##bbpjMR) z_OZ0B3QtPDN~;=&BaI|y+lmJvcHg&XRcpE*TbnA86S5|7HQ5CjrrRw2fdJC0*KHU_L8o-aQr(Y!Uv8Vv~oWCnId6|X~rkX91ZhKclTn4a7sJ8d^x z8Ap{49(9bJae$}7#8Ha`7Y};{@5MEe7y%mtqJA4M<>Gz09=?wl?e+T>1ChMV0S>NW z?}2o9z0*v)X|s*5BGuuPs2!d-MIN#qFuMTTidayr)5ui188ccrN<=unL1MGEknB!K z!drO-Rjxpn1US{c``iD48rWiRN|t$(R7gYRTpHlb zsu5$eVFYg>b&ieqz8F#@i7}9=8j&lzU6B29k|A1*L?lQsVGkR+iak)z2}eBRaPw8) z(Gk|T1=#LLSYlVha^(Eo8pW=aJ3;Q%FlDc@B_PKxK$Q_yrBI1syo;yp7&eI$E1;Vq zMd|g~Y2YxmE6zt#(574vVyMQ3265E6ZdB6g!>_;Bdz&8U&G)usQBv@})8-o4sL()u z!k&lqBTT=5UPcQ%>_vFO-oai$w0K1OH#}jV2y5vR>~+M3{DHlJs#ZNA7aSo5tPi`; z45tzY7ao!|LNSOnCQz=Z#WxV?by@3S8OjBLq4BgKliHyl^u@7$1EIaXXZ7lmguPF{ zx@v2zea2C}k~}V)nO>UwX#(|ok9hjq;y6mVI#4pnKYSgZ{HK12RBv&xgvOP71SPGk z-Ib9#sn|db`EX~}f=jW;R^Mw;Qc$J+)SJ$nnYjDnfE}+*el(NoC|QrXRkUY$$JMQO zyf#N3SGO>X?Irf}Zs97HKu?le>yCQ?(LI#>Te-+{7T z(DSh6jdf5?&84tBq`9dBp>%and2io3{MPUSs1@gh1{l2(7&>5pT;)BuG8Ib#;_s)R4wOpu!0XdEe*yTP1sA~-Y z1e0K5@4%>h-h07)p2Xe-njw_Ehnf)fzT9XK=zL#r9S{lwg_HzV@2hXlV^P*T+`p?e-*v-$OZKM+AQtK zo6pZ2^!e=>l&7vw*(@Q4V#ke{w(hAMDtb}l{lekr+tuIZe*HQ+XZDTAy{U(s`btsk z4gG~#lQfml?;v`bOB(WcbLxfwIDP7Wi_-;{oSPHYT-GPV9ei`qps(~HPiB6dbnUwS z#E0jHz1(GgyXSO!@@X$#J~(=G4z*$XYnQe!%c1_0U3zS6Q4STh;+^-GJa?pB{lOw# zYkX>aR{E{+KO9}bRdpMzHnp76_vX>?e?Qh#R7EwQvd=*mUBRgtX6T1eeJ`R7AFZP3bj@xWv%4wbwVS*@?*fdxkfFg^cDI0V}J9@ z&>!be>Q)&KsMbfscJlRFKD0lsz_PlPb+s?wa!!(>$Sbg_Zk2IThJ04W)pAO|xdy}X zmH9F}6RoVJxdx}hGg9TWm}r%8+?IN;tJN*o7sLC>elk20t)INy2~aER4={vD-f{K3 ZNP7bK8jR8B`8!;6sUzLhlj`dqJmcGKXdP_vv)Uf4HWe>lXGV7 z{PWL0|IGX|-~Z31F?r1uS92t3J!=9XUJVM)xnj_!gdFr+*SgFIIc~-cZePuGQ3-ie zMPIECTBtA33$@Gqw~Il+70lZ`qISg7?dyYL>jL$QVX=vrq!){J|1NAnZsN)5t-9j)Z)Pr0;C_4QvnFjUY6j{ zm+I5?LVE)Ibzh=O-L({#S|1Q`r^G2E^`>P;dy25o5XPpX=nYG758r6 z&vG4qs919}O+XA+jE?b<-hTD-87=Ggu;SzB2s3muGn58LE{ie0PNpEL=A`KP0m;fm zS0TTeo!=vWf+Y84NpYx2WdSQyA~tCbP8%v&@kdDNV~|AHFfGjya)W*Oz-UtxkK2h2 z#Li)}!S-QK#JQ9!b(4BxyOMfETGds=PRK6b%1!xI@15t_HCGOu+&$~m09Ju7&rQ4T zZ@#Z>to99ZdYrd${z>k>%-2TG&18(=fL9wT2{C z0pEo854>HT#*#XX=pl=*l(VFE@8OYl9bVXq52ihxxA^oBR*4Bm=XcvYzKA8=knz?Z z0ymVfB;0MX zVJAy^?iomT3^qvS9wyq8;@iVPO=TNLeVtyqn&*x4e~@M7PgrRT0e<=zedo;v&%wCGdcAOHDk0jfJatUk#< z9({KGO_R3V*cokfZC|90;Zg6h#;UV_?}u(bd`%vgT8WzWl@)SnaQ7JYnPg99=cej_ zINIfr!0Hm`iHgl3C;-P9yics-C;ET}SCrg6f#b9kTE%Fd>;k;jqi?EYxVGSGXE9GI zh`ib<;%P}juzO*HZGsuUCj z7XU_gRu_ieC9DlYEO6rCjfu6Y#XyFsZDsN?CsC+XVg17_ZHpp8XG z59%CCnNk+`+u&$>M2q_i{&rz?TxTYoJu^L(T94pyN}{*k2A?+3BYMGDVeHJ>kY9!4 zLQdprT}y+r0e-Yw)ho#9lwgUpEYtL(8q!s_7i`?Z-WAI*4b&+F1gBfAXT^~q<6C%g zX@7_#VCO?{vKWPOe7ra>AUd(;*tJ(88kzGN)w46x6J}+0v{-{s9pm$gE3*&%iKS(Y3ZB5&7I2%|~r?SdzS+wQ?_X%P1D5S_s?1;IN`H;^kNgcN)rCzI~xxsazo4Ietg zI0bnc)EDUT45O%oVwoa81BIP~g9dk{%Z&UWwle}$zzd1fa>}5E93*StN^Srxp-R`X zA^hX9H3$3!jfvf+(*^`hF14_AI13CwyD!_iS!D!(8I^jN3;PFA1(&#=)*|FlbB=QiHL+zo1AD~9i$P1 z&31ze=SBeaQ$*s|>O5qv@1MA~&whAzpdlxXGih-7jiku0cIgS>9*yxLrEwe$&S>0b ztv@wu^~Rl`XLVAS#vK*i(3(fa9Q^glZfJdKw_l!GV@Fnbw0_78ifBEuj9yQo-$N?8 z-19;Y&%)6tlAq&lu7vcM2d!usxx6L67+LG4lg`B%fXK(dRv-rX`1zG9tGoU6#%)3E z@Rzf5Q7^5(j>`ehJzaY1shlV^s9HxxJ$v%1t@n+g^=~U@?|-AB(~#NdhT$zuf9X?z zto+k{cjl!BhgPB!NUJOSRlTNT!9T0`zi8NoW=4`LXMC_egRqyI5Az0S-QLvX!b;T7cLUy-A3^e z6c@{hh#Piexx3hTfX5K*8f>4p{44ANSW9|W5IVjHE~RTwo(KzNaSdi-T3fDx{5f7; zvR@dJWA2x1It226fFmv^Ku&>in+VG1I4d50xbgsAEs|)x;au;3ZObZ0CON8T8$GQk zZq@^WArB2SP#j*U>9e$T0D|&NJAGrOe6)VPY%{-M)5X)X1`oW!f!-yG6`I~g6{2sp zbbMM8J>hwmwy&JDa(n47J_)ZUtv{b94jeZ7;^|4ei%bXl=tD@^rk=xRqHFL&@~3%- z>PJhWC)3_el>c0PIBVg{C&JUC^{m>x=VuN*9;VY_+grMScE^UrJ^P~3eWw&ZuAVH} zJNm9U5B`1tlE3)+$bumsMvY@_??8F-rf5EcK(xp_F_6c^jwiuZo;rWzPl6YQ^69EK zU<=fbDjs2?eTISd83x*C$Uh{sHzSWixMLbmulg9@3EQu(__PGx0zRn9`K08;)Y06$ ztaX@P=-1)5$V&|PPsITI7r%GFfA8v&y;F9c4&R`(K7HJ?BiA07h+ws1rgi~7cN@ad zW@Tl614!qXVf>LRkaZ9OP)z&2oo_9Ew)V5Ip0s`{_sT6n+r?mvf2EwS&P2Ob@k7szyVAnXan|;B0{-jI9UpRc(hK2jp!Jlc>$WsZemP7hbH9MB z`0Tlw8M(7i-TDm=jM$owzB=&Q?nTYXXzUf;pPc>7o~UuG?Hz#M0r-(WYyO}726$nB zpJoC+#ruRGL3EGaYarLHwWPNXpxW);AbkhYcOX3=OPde)V#s0pFMXmJc>n+a literal 11883 zcmeHN32+q06OcfY&REW7$4iFQra2f|l8S;B(XXzcSq(#EmRZ**| z|IF|I|NNQ$-S7Wim4>gKrq{_$Y+LN|P=cE_PiNeE-eXd)B{6ZF$21-f*tp`c4DYcE zytGD2UnI?!(!>{>GZ{B+w@YFR7tL8{UT=1~8GVtPb}*6{J%kqMVe++4Bb;&+yCpH1 z6t{5#H_E8e9OzqNzL=5mEsZgJYfk;y>dqZahqy4?+>%&_=wWwY&dkQQoha*O2Pw$eQm5EUlFK z6;cYx_uKM2Xl!Sa6@;6rEf7_m2+S(NZBqehzFKbW6&3>%CPtY&Hi$0{f;NJ;+srsX zj5-`PI6jP_xVN^jwz9Uxab$*|vk`6p3zd3*Q%E8H3yOIw4_raL{ zpT>40SugKCySU%#e3I4Ruh6iGhY4$5b6UI9WMybr@ZEp)7BqSR>3s{_1`i=w@>F~_ ziDV7IZE*_8>X!lS@C=f*9-H4gkYv>-g{Rf(h8YZf>N#9?uX<(l(u?FAwqGzLyxMRqDWS=fo)jkP#(!@@Ele*Xz{{STpzP1dC9dNq!YR4=d{{BybsBGCFaZb zT(1~N7FHW`%1*N8f!g^&86<1!s{QxUruuDAime1v>MC|(`4p1%n*qmcVTGwA>vP=B znMu~iA3>YjU153WFw_|K)N?rKQFQjw`{&6y%-xa3Um6-uKJC$; zs<3F1)qpw7uXWF_)z~f4S9)P$*9-UkW^BP7@H7x{$1y=lUk=BKy8%~;-j#W07Blc1+(OJcZ#q45nbSR{FS59e?$lkdT3%#H)=Yyos&n4Ji; zrrFj$vr!J(#!-;|K#F2vcrybT1}E?eC(i72m~0$lQEp@S5?){&-Z5DbXJoQ+OH$d# zP4F4;vEsc(e2gS~#d{o-N3hv#b7;Zl(w7^hy+Qis-sSGlHF_sq~K+KeELQEuA9M8CI49tGy zIORgk=B=^TI4BUHGO#n6d>smew8EekOtf#owB={mX}j6VI7;+zsb}nr10oe396Q6~ z6kx5$YjKSvM!~{>tluU``SQAa2fmLO=Z*Us1JQ!b0RgTNZ;5Sqqti^gXtPZyA~gT!L3q1l|!gtzi4YC?f746v&``I^zi zwqn{;BH)Q5uOqBZ4}^w-GCA9}Jj^*Zn%t)k8V8mSp4QvPg4`}nleGnA!5qj<&8b1k zkA>X_pM%*%eQ&|O2PW7}xU0n7R^08t-5+qb3wL{P_Yrj3?}MYix4ut5eIL144W|J$ zBz7H0iji`q$OE`wh=`m?+dX(}KOT#M3YCIF626fe68UHajy$x7$4haEtnOVA>@v8+ z%T0+)nk|R>`4YQaAuE8C@$6k7Mjb3M*d;CAA{EL|rIrSGv+Bfz92k+8khaE_*S;7s zNfP6sQl)SxyPQz{@{%!5jK(Y|FkuZFW5kyDj)N_pakzw%Cwz=GE)OhstSqq=;Eqzi zTVvU^%1w}KH9}phY$52euj7uw-8AUL2*D}Pb{RH_9V=j%VngZWfc`oiIuOwe0iAZ@ zW;}voV8n+#;?c=%iMxAD-emwjH~wrkl7f$&I(=kgLj%PLdkOrX#L*(Aqb4_viuR>N$d1R&I=J^^2=(ji{>f%}#Bm zB;dLFUYlBis`RY4oT(nP?@FiLtFxZX!}Un9qou0#Z11?1ddEvgDC1fR!*W4eX~wZE z7q)UW`>ue#%Y1F{X7>a%dls~T-H}@5wzL1GjeXHi(WY;Uw)Vf#L9y$7OhbrLDo;Ra znLEA!+DDZQF!q`fa$ok|l#tg}3pFw!m$L$PsSg3lYC`~z$zx(4z^Jm{d%=CO z#C``nV<`IwlMwc?LUb5(zAv~A2!(-CN&>4(dvADzF0s@t@HxSp|Ni?0Ck^U(ZpFxos_Yea3}?2Sm^*jp9sK?3Z1w3cc#w2+Z(JUFoU@(U zRGEkVoXm9VJtYs#kD%JF`1o4A_Un9j5Q8c%YiSUwPa)DBDZ*MBtO>22dhgYD(Wn>$ z-6oTQld&e0o;duR=uA?ci@Kq&WS~q#w{Ad-PeTIT>vCb<1%r%_@T-7=lamxWU?SR6 z)Us;swoVGZR<}p$`E*}=D1A2>ZOCvIJgoRvqvxlw&u_wKIHQqI)|w8?{!b3f8i&u( zg2SbEl?=)l=*8(61*c;aoQ`3CbU3ZMvi3IKXG$x&-og9Tu+-zX4f?GwrY@gdhWV%1 zq41;P^Q@^q4}jCBe^{LUXD`E%Q3tLWjve@N$-Wui7&=wnxcJ6fw+-8VKkkojpR1~O zpRP}T?9hFg>rakAS6g)4;nwG&dwsI4NxD3=GBf7LPmUg~*S;628cekKe8yWxU&zYp zwz|6X8P(A$u4-wpZfo__dsTm$Qb+H#D72hLxs{`b6|7H3xU4NGcnNh*g|?vw!X(wy z4J}DpemXPpwt~||6VMU-Q=xql!aqpP5hkLyKd3&jvwyf3r#qnXGHAIe=(C?wJs%A1 zi2inK9+!RRP^0HZ#_2{pzX_+Ca9aJXt_i38&(s6f(NSKUR$)5i$6iiX^#*u`jZRB} zC707O|GC0+uP``q&>VedVcxSk`hNOy`tr}d-nhth-JqokK*--o#fkv*{z9#udavrw z`0C7UPedNlQgu9lQG!+f^9kVqm-iQHVOqUcgd1;Pv7^GOmIiBItEb+3_2J+0ojMQXE}uUz53mlYU~M^(}TjIWLntxmtdTI&VI%c@H9I7VW|lkZ@Uc)Qmns7f}? z?)FKn;A1?Z)yw!87b}RWl2YdJJLlb{)nUSXB}?yO_qZLRHP}a0`q*V(!0nvpVeJmw z3AmLkpX?1ZS?xp|F9_{s1c7OSaqR-@^!r3fkew3dOdy=9as`M_SXQVu%VM=W#-T1% zr>Xh282Du_QMpVdp-yTFu((s~-M6T1ty+7W67zssQBiT{IY=b4l}f6ktwzn~GL%Ho z$#@t6I&s-bY<)<#+f3ao(5*aaW6_2cCrdY%s!L#cZYWn!o}?X;#X^yGzp?P47hZpZ z=JHN$zV_vZCL)R}LdWPx2VOr_d3eoUdiW?h!V2Bg3c142ZPC_ur9O}$=H&480m;k- zyWpyav}+E%zK>2qvgXmGD8!7sqY*md3G3^t=;2#Qx<(_ha9~Q3Lw19H`9NtCM338v z6~t!2YJ>H|7K?K(o$JT-$9lyL2-T|Vh@OyB{^*XmyQhpkL+`orNapUZXAYwUV3Je+ zP|*}Z`t`>BNh5Espt&L}COXo^9XuRZAANl4SO_*gjsPB6CHXZXdy_Lp;RIZU5nrSN&jvV?eAfazsp zGOpX6M3Xv;|MBLI$#>EueXA_Sjz+Dv2QTXK!tf`iZpmJJd^jz{#P{y&_w0mXnl!f8 zTdM-$;|TNh_|3&k8Y6`K$lTZb(g|xuzdp^W-ass%g^U z6MuF6du1+7`VU^?7fzbgxc%v?NBpKxBk5-tYm1BS3{A#4lIdzIU-w@9*|BK{50w`0 zpRh2oPam|hwR8WOM^e!I=JoB)?Y)t4WZf%2E@c*G41ND}FT z)*yfN?T21Fnuj(VVmp_OoPb9EYw0%E@l;fMAo2RD>{`^)@%)ip%|lQb#~k`EHv`Q$ zw5X-gZDd8OgAoC+d)a+#gME?@Xkf(n0WlVfl3-LZ z-pAJgqO0MyEFKIS>>!PbTyCiGH!x9&^SXU1Tg#Cur=S462!CB`(~hdqZ18Dk$m z5a#CxK?YOv*FqUSapq!`9||qliYiH7#_h9%zX3i$9V$^}O(!Xwh0T@O4Mvj_L$f}<9Muyr{k)Ot?qM9PM^szE#a4QY z5-5n%PKHo2=xZsmf}4fYPps5F4ELGq&82`x4z9R|H95%Ok>}S$b!U?4JJVmB?_oSn zj`cQK;nT``ST86m%-wsw?3duWkQ=!|Rg&OtfT!)2)CzJtJ+MVeimv)$4at(*3p#FM z?~BLK71YTD7^homqK6}4$anCh8~q{DfRmTuW>EsA`Rr_CfOTTYvGXs6G*b69tY;sp zC#*{EXwn9zCW7^Y4dsX*rzE@FF2>O$;j^Q^6F9#N+M#*#i^P9>29Xd>VcMw7&Q8IO$HmgBZ02n_8Izri*M;6Ep8 z$R#tfh%e++(p|q7d?nQIrPGv?;j5tDtnxFpVm1`5BEJI)n*}Z!*vdLRHiYO;3t<5- zIE#{24lU#&nL}G*0hE|~R3#MxL_5~zfFFWRQMIKg$(RIj9o%GFFQqT8`M}V#-hk9} zo{kYGAj74{omIXD#L>cLt~bwUl_+m)0(E2g0_e&Y;%qq1iXc-GB)`OX2!v9mE26T( zSmsDHva^#9iq*38ND6{7=R-Lab#`+_rw;Rfcvk|3^eb++x zw_r8l`oup6Wb6dIfLFW^XPY1+TEuU`)pv3BB4k8&5-wSH&cIL=pgO6bFK~68V9G>z znNYbT;|RIbNI_j3ek$PKhu&>vry)0j`Q^(trgwL#nf;N{L0jCjV8lmO+kwU+^ zE2k0c84+ISXnGAoGwo}%R#NrBAM6{H3v$kzTu__+c0cr&FEg@V9h8Y$wif1%skZ6; zwBB_7wjL7EwDaHL`$^;*Afa(Mb=->1^J-stxsZB!(atQ-&a62l$XwSQa7&coho&** z>pt23*kd*QcHI7=puF>SbvhcL3{i18^rbCjbC0EkiD8I!@YZcduiO0SZASgO%IY`% zS#jm?YBcud!)?!BlZ(v!Q=k4~_fthxC~M&aX2F3v_5=d-%bGz}#Z15R%p><&06nT8_mE-&l3PiHN_^ zP^il}TXRt0(f0hv+L!UZYdq=>>wVjlx8#q5`)SnqQ8$iTb@SzulQ=>#PHjJD7U*)$ zM8p9ox%L8bo^QWMpg$%3K4sCK4};4y>M8R(&&@145~Q2Ask?ReJ@>C&+&>SE$-BGc zIq7I==a_}{PyKc%;=lU#VDs>kVe^>Vdw`xi8S1Yb5DZc;2;{A>=f&>=FPT5{7r#pb z`eex)um+lLExAPp{3H$Vduf25#Q&rKpML!4- zYK9hZ=4K-^BADCzrSE!j{@~ApavJs3n;t#3Y4PQglgKfv302|r0n!|mrdcQ*r>Wt9 zPvoT9W3DdwGV5caePk%3{!Mx9zME!UJ~=yZYImWnIISO{?H@N&ll!J{z$d~`?H397 z`(HXT{K2@L!DBG$2}_^(D&qfVv);=+6a}GND=HFj?xFa5o z8dCcuSfZqf7q5oR`LjK~rG{8^N0e`fP_c1?@03ZEt0kOE|(l9`Tj!nW3 z8Q|x{L4a-w@UO?TA2qzDjp@FdL2 H4(tB`bj{1D literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-D.pkl b/tests/data/sub-tracking-results/results-D.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7bd4d9ac370f1beedc36fc4577f5f11fea4b4d14 GIT binary patch literal 12117 zcmeHN3v^V)8NPX%7a`#(2Z%8sERVB-K~NrMOt3%}gMcZaF<$TPUb1_+`(W?Akbp$+ z@JhV}kxN^(wbhnaJ<6$7D#sVv;)|frmWl$$YLvt4P!XvhRr=4|JL~M-O(!@jD>GKW@V-I5Ar)EXCUG1?+S#~u%{@Ez zvRsEBF4f)5lM%y}pks2Rx8L}5X6uGstoS54!VLY)41Ggmm&Kf47fTRTb87tjfMn;Q zt596W&hODbK#~Tqq$Jd&vVfH;k(e|Gr;XLD_(LT1Gf5(Bn4aa<{9s=}Fxm`7_It2_ zxHxP!*gjmTIQJF$diZ){yYlskwW=$LoseC=otyTHzB@0lYpxtP`pWEMgIER5pPPQ| zUvdYtr0cgGST^e3DwfpYRp{8p!-jQ@-`0Wgq*5CR*8PF|Aqh>a_JugzIFTiht$4YH zB~8R>Susl*RSId>JeKqz7QcNAOZqA|pBG-ouq=4z12!(aP>&zov<$Z#Jc| z3Ir#AxbN-CESA(|OfN}%y^EY~rt#67hv>DQz8_|xi2SW*j4 z2R$t5xo03P8fucPJxq0_CAWvR%Q?~2)m*W8>=)VPE6%=GHhHT4-JQPW=s3@<--4XO-m{Ld|;Z$ryY2Y>&sH%icg z<0Bf=LX*%Z*WWmG>kVDeX77$=>UbXYtyofX_HP5x{DapNahcVqc~6Cd%Yxg#;pAhjb5F%n?mVBZ~ zkXL;XQSbt!^kj8m=-r~)FvJoM9^RZ-n_3KJnA%sS7;_SZ+7xzGl%;)9WKec0ZAu)D zC9<6D3m%BsqifNo)FCl!?TW;9C^^BVOe++zDeCDwxPIW#V*97wEguk7fgFfBfPO($ z!Eaq`=$ZT+S*RzUy}>mOEl_x`76!*aP(@xA+@h=)tH8z9a}^Z~#KRL2dN=l1Dq_Sh z!tQ6zG(V{O?927#fkzImxGdTnWbnuxbxEC>Z1&9bR_4iq?D2_#MkjnaMOh4hvBKE7 z^R$o($Az58b%ve=X9GNKziL#G(<#Fe>3Npv$2DZD{s7pxjlDaTVHv1L2n!y++Q^C{ zVaQkT6w>|>X~52FaIzSM3WB`Y5EeaHbL{*pF^$Z5jqBN!=?Sy4I@+v3s7rAB!GijT zAE#&6`|AaFql)*A{m$@(G_VfSo3D86(a$RO+`>ZFE3PrkH{LNKN&&%zMup(ZB5 z9+1Rv#gMT2pmQ=;DQgx|2&706oM|{~z}X_4HQ{V2&X(b9C1my`!e?NFbL1%JND?Ea z3ji>N;kMPdEeire9^yAxrU>}Y*(P#nf~MdDIh}O3&V@7+ zYWUEZ#wkeEP+x3Fv&>>P6sr{J1t?q`95lEqy;f`p(VrQ@0$%tyy`TbG$U(A)w&VuT zQ)&!79|A-kTXQ%BL8oH4^7L#>g18P&vMWdLj%z`<=*2)-ZTyms5htv{p~syKsSeaJ z!)9NuB$$;XPizKzV%x$`Jwpwtf=qCfxxW%xEC{rRqlrBp2UypVm51{Xl*LE1e6+iz zMIYJP1@$`k>V1#*d7=#HXTNx?oOm2b8X7wr=`C0YX+O^1#@RbKI{;a{QAtk#8G8ax zVxB!XdkQjQO48G~`aaIKLq-fJwv1m-qTj<%Sq>w3{j-RU@H)VoO*uk+PWSu+^{{O zA3R@Mi2CSP8Mqwu+|%W^A1jDcgQ|6C?6XI&+;;DHTK}rDcJDu`x(usD6Gpc-|EXUI zvhz>>&6!sp8eWZh-8D&Q8rKim>xpBK6*vY-296-4h3>LomV1f85(hO{<*iVse#aSW`MRRLzX(zOraALU}vkmDrpt(||*Kv^|?J|oN zC@z%~ku>Zka(6L)cLimd;jMw=b~LR19D4!wl3o{tj_-nR(>th2g|)JI2ep{io_8Rf z$16;>?7#Rc(UwV;Patt1Eh#@h$_CD60`vu*s!f}!_uz|lZ2>=QnP-bb8j~fj7F*yF@XE<#5y> z`mjsKrzO!7-hgTQ+No=Il>hAG=z7xn^F`v|5w(|2PvT(Yy3t1`A>~_oj-G|C!LQAy zixAb1mPAjcy@Npisk$j|>1#)#)1&pgdAl#n8h$uRr^9x%^#5eh#^t>Spm77Hl|G>! zE!#crt~n3=b`X+2`{K~zVJG9pvA1^uJ$Y8NUP2&XWL_D_dt&FSU>mQUKk!$ zH4t`&8^@N8u>e2E1pFKm@N=Z^72un}hXd}ICCBeOh3|wNS67`bgXe$~H3c7+9l2#3 z*O2!JraM9g{9ySl1O77!0RPqR9PmH5x@`BfoyVg$D6P-9>DlWZ+&2ZmY9&k^0(|Zc zgagj%>Z?p3onuAvhx#M?AOxV8_B}h_TK??3PojF#`mw?*wnm(n4oJxh#C9(lPy*?Z zVfZnrrA4km&8r|yE{@aF-hSnlldX5(PppqjiPq1IEcf;0+Sc=(b|d?twsD7=FD1>|u<>)8y}g5g|M%w(54$_<#ppKBddA8}wl>}TT9i)a zei2#e*>m+X3umJR8#X>LW?M1(eBYa|ENe+e6Z`jks`i;(apTzAI|08F@MC}3{6F~_ z@ZtbJ#{zu8?XeI)<%bdCqYs+Moog@Y6-4OYHsU+d;zRsvG3`4I@sIrU!wpNrC!$v< zt+L=sq3rEcFS>=a-l?bg4~6#iqU%IUd;8Z# eeDnQmGZpuJFFO&x6Y)C{AHb#Ew|r4^JO2knZY=8n literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-E.pkl b/tests/data/sub-tracking-results/results-E.pkl new file mode 100644 index 0000000000000000000000000000000000000000..098298c27fda48eb0588b8f756de17d31d77c5fe GIT binary patch literal 12117 zcmeHN3v^V)8NPX%7a>6&a=;ja!tyu^Aqom;h6D>_F}zF(4e@$+_mbVq-3NQ`l|&_i zhga$?h+Nw$t*y2o)T5kIrEnBQS_CC1w56iJu^Qzdc&slfXqEmm_s%+dcN3O?uhUG< znYr`NKmYtQ^Ur+$KbuAA4^$23NYeW>gf(7`2%dRj#Hna*`fcbvW@_$RaDzYCFhf){ zSyhZR#^A-qLc^h761rWC2(oPGg<&D+6%?l@6cL?sBSJtl^y~?OD*T9iM|-66frf~o z7qRUApeja!f~+_LLQrTBBZ{HtRmdUFyqnEB4Cv5%TU~hB?^c}AK8D_%*MeccXPzwb zZrlmD_1-}(5N>kvM4bRA?H3{up$X*j5z!M0Dr!XYsF*W@a2on0pgv()W4O4bMrDfI zSZ>TP9Ih1j>$S}A_0luEYExLmol>V>XSA#^+tc)vyNv4U>Z-GlNaiY64R=eu;qdj) zGZl{@3lZq#E7DWv#&jEJ>&8L1%B*WQ&hzS1wv-#oV0hm^-{8tDbCWoZA?<3~wCdg+ z2UxCSkCf@|mPv@=O3*Pm((A8$HnVNrURHb(9btxkW`@2YvCCr4ubU-^syQWoen7Hw z(N!p}XXp3$A0SBsSW*&dQdz)Cl}JpQi_`iVR{T+t`kN#XHcZcQYksh=AQ)|iBKtkq zKwKO)8*Cr0RGj+?eLa1BuwD83##+^t#7@XA-^xw>MZaBV*)>-Vzqfn#iGi#FU!0kK z?O$>Sv83y_9A0wGebp?f)2q1+u#gocxBv|){9)u(`v)UKpG=2h0B3tod zElZk!(~@GAbWItgz4KYpS}cCsXqI#?F1)S22<%|$dGF!!Q%6^3FKc7>uz&M}vR9ka zSOtQU-aYtwRTfL?HoCVYzFftUI=zQSJ9T(rE8d^JwP@+dp{x><-nygblM_o>(hWJU z{Vse%IZMLbrXP}7(p}K)tubXRY0j#hryO%)E+|u00n2p)D{*ffOZxSw{r$V{j}E;(@mDYJ z%tb5y*;KnKEgR7-JHPV7a^dbC1K&PfikkmAU|7YOk*F%L{y)=3%|S~(3I6_Huauw# zCr31;g(jj;uOB~U(~aHH2Jem~>Npu`s{s#C2UQzLg0O>(hF)4YTP{8dEie9stqckb17_iz%N9+QT z&X=;Cqtt?^fG8>75)vi!!(WOP;5F4R`|l7`e<;X@#R#aP8hSBq^$8L00wE$MX2~a- z1bNjL5d|+mN-tIyhTbEp4MQyP;Ni`QwX4M-hN)v^iZLfqs9j-KM_D=+MFwT3(yqi| zSR%{WvEZvPdvq<@l{z?vtwWL64kahplxc+`Hbp(12iFfgT5SK+yX6C-Dv$$F56~~D zD)_An4Ly^eD+>+evk$n&p#=)>)xzKy2&%}-f?JdoV+FX_daj~^fp~Z#Lhr#IOGS+M zMcDn!ndS#|pMAN$Jn+cD6_-VugA5+IqduuKlg*x)KFWMqkUc&z(By!QGd*EeR!5sP2=xhWKUh#7 z@#FOD27iO#Zc_2yvELcKkOtOadh?Y}JpOs*{%I_9?RJfB8GqY|C<*+jJPSjphnkoK zdq5Jy6+^=6gU-oZrL0*>9t}*i2lqF7VyHy=>?V0LJpEWv?Vuy zo>FV*`4AxT*qXy32s#zRm8WN862x_Il3h9arMMP^i(U+b)uylM7;(ZH9D3Z@km^Al zGi>(dN`hHQ^2BDaC$>HO)HBqOD#!#!nfoiD#ezV4IGWhwae#F#T7E1KL0NoU%SZcK zTlLbmZm9Q>J@-G^cXK(=Pk;491@SnNG&FWL(rd5~(jlC^j4@;ad(NHa6Bj-dWy}~RPO-3Zp+UP-z3m}ai zVzrxGI5$JAA0ras))ygr{lMh4{SUyy1Pw}QBuc~2uO-EPbxKbN2x$};D~;o5_(mf; zd;QO|*KF7Yde*1)ShTacCwk!Vafg1nx+hwf+4JWo9&jPMJX$|&g-x^`TgI;^(eDuz zT_(TKTV6aC#qx99Ej5t-D5F(v*R5iV9~-?%NJ zANiuL5cSosHgGv``__tEPZY$dLDf1s=GphI+;ZPIT0d7=ci>&8a;P5351F@17_$kL{1_^~5pAG8}^>14j^2L_K?HFic$$UBg} zz$;9)?7#Rc(UwV;Patt1Eh#@h$_CCB0`vu*>c<|dIfz$_BwBAi)9>Hg@~V(Ujw?C_ zP&<%Y^{`;dLxW5phi7d1c5NSk06o)AAEGHAt)H!Yl3%~^;_2Cd2VU<+ZxY2Emcvnt z=)*1@pO!>Vcmt;GtEa5qQSq~nqw7iQ&lQP-M$}zAJ&A*n>qZ}b04d+nbJQ#}62CTo zQiP~}v?O{m?VSYr&(z2A7Qb{nIz3v?o4@butYOEZbUJKjOaD*qT)(u>05o>M)UwU$ zd*%Da-aYr>-ws65=U*LNIP`pJPIPj`Y2Pd^7xTz%8@rQBnyLE!z`f{)9O zPaDfM<~@q(j*tOAS$@lq|4ahN-}9YA{yW3V_f6e(GJ1#7`iz^Ny>9Kn$p}^}Vd@m* zbGIQJa@N#bZ9?f>E0#ZcIkFEzAc|?TdswjW{}cZm5` z(jp+NW{Gy^bJG16H~kd>W2(=fAeHd?T5{X?U-6r(Q>zPfu!YdV^6d9SDIp4l5Wj=jAL^1C2E_P5Rd zlOF;v4Dz#76Y)n|c%Sma2=UQ7&E&?lm-GT6^luyQ9cl3){E g^Tm7f{cHmj_kAzB@V*Q0yYL>srNg&;QFA;02Op6vjQ{`u literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-F.pkl b/tests/data/sub-tracking-results/results-F.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d5d800d365a93274dab43b6337e2b5bbad46e6e9 GIT binary patch literal 3842 zcmbVPeQXp(6u-9Da;3E`g;EeH<-0&VTOkpm(4!@Q99E!wNU$z*dvmvY?Cu@=0X?)( zfE1PPD`1SEc7f)<;Y7zh|6K{Q6+?A~4GK3WgvlDXO6 zy!YnKy!Yn4>2V)>SmCl8sSU?0!PJqs1M6PH3M9vrRCic`jch{J!W*z*DW*aD>Ad~4 zn>LU!(OpF(sudkcn2>;}nNmP{TSUbv(JOnON75Vt7@g zp)l4BO3GW6sJP=5=NlGmAQKZTf+7bDZ;FSKO2N`%vbaORLVz&=HJPYcYAo&*Y;{yn zT1L8#;viShu^811Q@2EuQ5M^jl#BuOZIXWKcO9f{1p#`PZlDeR0{GP&qEZbh4w~^8 zW=w^zE~b6G&U_Imc#F2Tx3BpZn%((ZOd9CxqzzIvDKSK(ARU;bI#L))V|yus%>`_2 zo>Pqj{r6fvYoUi=d1;O`ug&8a$>rjpOV{@u`1I;^o=RQcJ?-3q)jp0Y2V;KF&8rVP zhEIIOi_e1*b|`Z@l;)=&i?hF{5`vOyuFc*bK$%o<3gHm63$OG0yMGUwcP0G(sOCIrOPL9-E`g_Yc^#Tl!f&3vw;Vf<~cYW|efX zVBuri@WGU`=js;^^ET|z|G0d#*v%`jvGv93>4P2~y3+jhH)qza;i1G)xsoTEx7*%& zqTmJbch`SWcks>vUWwJW_e}VBRWlDQ9sAw!*wPjr+Q9ghYdmyx_LGcM;`S65%#hqpujEUmgV)Y5!o)xMI-O5d^GD>oh;DfjK{K6zFgEb}Ez_IUrl zwV-{~bAJA_*|+!n%*NJ7&-)ttIHB5?~)`awm@{6AUU3Mw{{9T1(6`jKfu-P%n_i4amy*dEqGX&l{c2tg|b zg#j|Lpr8O&4B87NHYqbqa1c?jMUZMfEDfy77_48?;S7@Hvs5OPLq!gyxPmhZWT?m+ zI`fK|Qa)yCjhzZoL}SaT(2Ps+!%G7!Z$V^n3RP(W?uucNY0ip& zBW;q4*DTITjuUp3V9f9a)S2VS__r~$%Rrrgn&J`Dz%4&Qi%|$>)hV7yVu>lV?St* zkBUiwHcY^TF~+Yyj7m_V{6{p#NKhmJgJ_~cfJ7jY2>xNB2xua{*}c2WT|X$DOXg;O z^WK{`^WK~HW|!x++a)f$k;<-wC73!AyRjZHtVnuHNqM^!d5%rUTGx7PSc+-TgLKX- zbSL$b3GuC1M~Xs8SpsQMWCX;xjsu-KQZXgICS;~BdA?ECDlB+teIHXkYWT> zq@gaX8U?IYofSOFxEH#k~2(~&Z zC@mvhM@f(?=va(vhN)Yk$tVkKN=hbx`Zmcy8g}ibp}Yv)OV?9>I1hdmd#F@F3ZiB* zff-Z&+9&AXfHUtVc{^xpYir9rXm%HFG-+h8gZia%QfP=sK{_x=)g-?&i*0!hn+w=N z-bEjFN6DIFjdTwzFV#tNLSDy6E*A%#OddV3@A&sT)%AB9NMx`%$WaZ$I5z0Q+1u^c zJ~+dR9|I%oQ08_h&CNa*XMdAYg3@ZP8M;4!a;eg%P}9Nh@5o2cVl#PY3~Huj0cl!^ z;h|2}4sGDYU$)x}2f|=5;*D4`_?HGwTVN=%$Q&f>Vs69yBb?8AsZ5$GRWiSlre}NA zqqd*eR({d7_Vuch_xLk6F8+3E(~UY_fq(C|E&8fxHV-X5cCmlqtF1gV?o$|_;lW^~ zL(MvCDAbSx!MVTr1~k;iYhT9N$|fGNP4Ue}9%^E(zlMhvHb6VGg@@i^;y+u$L-(U% zW|efXVEzN!aPO_l`+a+^@iv^%*VOP`pPN@eYyS1e3oTwAnzUqE44-b{p>f;r^0J8cB=XP~pF`U|*MU+t%n!TAX2YDzdAO@lSYg_W!B9|I69hV9(9@Nq2m8@UNvS*BpJaG3Hs1MR zR&eX31=X&?4Z*>4A-~HDx0B<+?y!g7y-6V_k%zKVZxr`pBM?CbBJ6v~qSz2Iq;$UEXt)OBP*#Xf3sUMjp)U7)yDHJ*t)MXDVp)`*77(&!aKw*GPEGQ^~6@v~yiA{dA@rLjdpiYQ?< z8GvB`D_8|*g|&;eSaB1=#g52>lz1UFpwgC2+GKfE-+@)krEr_f|gz`Y49GR;}>Z)8n!@j8UFl;eb5r5JO( z0d)*>`@w-qb{$82U2+$SBu!>Jeq12ME$|Mfnh$+^D12nzswvTi%;`dIf0>-t%@7rNXdVlgm3 zP`@xxZ@&>e2oem9i9ksT?FX6xd5C3`!mUWL*xY_L=Y@x%0Pi4Wi<6Rgx8<*gzcbKB=A4$ CaAA4? literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/sub-tracking.csv b/tests/data/sub-tracking-results/sub-tracking.csv index d20c760..f147920 100644 --- a/tests/data/sub-tracking-results/sub-tracking.csv +++ b/tests/data/sub-tracking-results/sub-tracking.csv @@ -21,6 +21,7 @@ process_id,code_block_name,position,timestamp 1723815,X,1,1745449624.4956949 1723727,Y,0,1745449624.6481984 1723727,Y,1,1745449626.4954815 +1723727,Y,0,1745449627.6054815 1723727,tmp.py:9,0,1745449626.5598955 1723727,tmp.py:9,1,1745449627.653375 1723727,tmp.py:38,0,1745449627.6730132 diff --git a/tests/test_sub_tracker.py b/tests/test_sub_tracker.py index e9e8ea6..d1dbbf8 100644 --- a/tests/test_sub_tracker.py +++ b/tests/test_sub_tracker.py @@ -102,11 +102,15 @@ def test_analysis(format_: str): sub_tracking_file = f'{folder}/sub-tracking.{format_}' analyzer = gput.SubTrackingAnalyzer(tracking_file, sub_tracking_file) actual_results = analyzer.sub_tracking_results() - with open(f'{folder}/results.json', 'r') as file: + _assert_results_match(f'{folder}/results.json', f'{folder}/results.txt', actual_results) + + +def _assert_results_match(expected_json_path: str, expected_text_path: str, actual_results): + with open(expected_json_path, 'r') as file: expected_json_results = json.load(file) diff = deepd.DeepDiff(expected_json_results, actual_results.to_json(), significant_digits=12) assert not diff - with open(f'{folder}/results.txt', 'r') as file: + with open(expected_text_path, 'r') as file: expected_str_results = file.read() assert expected_str_results == str(actual_results) @@ -135,3 +139,76 @@ def test_combine(format_: str): analyzer.combine_sub_tracking_files(files) assert str(error.value) == f'Cannot create sub-tracking file {sub_tracking_file}. File already exists.' os.remove(sub_tracking_file) + + +@pt.fixture(name='statistic', params=['std', 'min', 'max', 'mean']) +def get_statistic(request): + yield request.param + + +def _get_tracking_comparison(names: tuple[str, str]) -> gput.TrackingComparison: + folder = 'tests/data/sub-tracking-results' + file_path = f'{folder}/results-{{}}.pkl' + file_path_map = {name: file_path.format(name) for name in names} + return gput.TrackingComparison(file_path_map) + + +def test_comparison(caplog, statistic: str): + comparison = _get_tracking_comparison(('A', 'B')) + actual_results = comparison.compare(statistic) + folder = 'tests/data/sub-tracking-results' + _assert_results_match( + f'{folder}/comparison_{statistic}.json', f'{folder}/comparison_{statistic}.txt', + actual_results + ) + expected_warnings = [ + 'Code block name "tmp.py:9" of tracking session "A" matched with code block name "tmp.py:7" of tracking session "B" but they differ by line number. If these code blocks were not meant to match, their comparison will not be valid and their names must be disambiguated.', + 'Code block name "tmp.py:38" of tracking session "A" matched with code block name "tmp.py:37" of tracking session "B" but they differ by line number. If these code blocks were not meant to match, their comparison will not be valid and their names must be disambiguated.' + ] + utils._assert_warnings(caplog, expected_warnings) + + +def test_errors(): + with pt.raises(ValueError) as error: + _get_tracking_comparison(('A', 'C')) + assert str(error.value) == 'All sub-tracking results must have the same number of code blocks. The first has 4 code blocks but tracking session "C" has 5 code blocks.' + with pt.raises(ValueError) as error: + _get_tracking_comparison(('A', 'D')) + assert str(error.value) == 'Code block name "tmp.py:38" of tracking session "A" does not match code block name "tmp.py:abc" of tracking session "D"' + with pt.raises(ValueError) as error: + _get_tracking_comparison(('A', 'E')) + assert str(error.value) == 'Code block name "tmp.py:9" of tracking session "A" does not match code block name "temp.py:123" of tracking session "E"' + comparison = _get_tracking_comparison(('F', 'G')) + comparison.compare() + with pt.raises(ValueError) as error: + comparison.compare('invalid') + assert str(error.value) == "Invalid summary statistic 'invalid'. Valid values are min max mean std." + + +def test_overwrite(): + file_name = 'repeat-file.csv' + open(file_name, 'w').close() + with pt.raises(FileExistsError) as error: + with gput.SubTracker(sub_tracking_file=file_name): + pass # pragma: nocover + assert str(error.value) == 'File repeat-file.csv already exists. Set overwrite to True to overwrite the existing file.' + with gput.SubTracker(sub_tracking_file=file_name, overwrite=True): + pass # pragma: nocover + assert os.path.isfile(file_name) + os.remove(file_name) + with pt.raises(FileNotFoundError) as error: + with gput.SubTracker(sub_tracking_file=file_name): + pass # pragma: nocover + assert str(error.value) == 'The file repeat-file.csv was removed in the middle of writing data to it.' + + +def test_invalid_file(): + file_path = 'tests/data/sub-tracking-results/invalid{}.csv' + analyzer = gput.SubTrackingAnalyzer(None, sub_tracking_file=file_path.format(1)) + with pt.raises(ValueError) as error: + analyzer.load_timestamp_pairs('X') + assert str(error.value) == 'Sub-tracking file is invalid. Detected timestamp pair (1745449613.532592, 1745449609.7528224) with differing process IDs: 1723811 and 1723812.' + analyzer = gput.SubTrackingAnalyzer(None, sub_tracking_file=file_path.format(2)) + with pt.raises(ValueError) as error: + analyzer.load_timestamp_pairs('X') + assert str(error.value) == 'Sub-tracking file is invalid. Detected timestamp pair (1745449609.7528222, 1745449613.5325918) of process ID 1723811 with a start time greater than the stop time.' diff --git a/tests/test_tracker.py b/tests/test_tracker.py index 4c9542e..20908ce 100644 --- a/tests/test_tracker.py +++ b/tests/test_tracker.py @@ -248,7 +248,7 @@ def side_effect_func(command, *_, **__) -> None: mocker.patch('gpu_tracker._helper_classes.subp.check_output', side_effect=side_effect_func) gput.Tracker() gput.Tracker() - _assert_warnings( + utils._assert_warnings( caplog, [ 'The nvidia-smi command is installed but cannot connect to a GPU. The GPU RAM and GPU utilization values will remain 0.0.', @@ -284,13 +284,7 @@ def test_main_process_warnings(mocker, caplog): 'Tracking is stopping and it has been 11.0 seconds since the temporary tracking results file was last updated. ' 'Resource usage was not updated during that time.') assert not os.path.isfile(tracker._resource_usage_file) - _assert_warnings(caplog, expected_warnings) - - -def _assert_warnings(caplog, expected_warnings: list[str]): - for expected_warning, record in zip(expected_warnings, caplog.records): - assert record.levelname == 'WARNING' - assert record.message == expected_warning + utils._assert_warnings(caplog, expected_warnings) @pt.fixture(name='disable_logs', params=[True, False]) @@ -331,7 +325,7 @@ def test_tracking_process_warnings(mocker, disable_logs: bool, caplog): gpu_unavailable_message, 'The target process of ID 666 ended before tracking could begin.', gpu_unavailable_message, 'Failed to track a process (PID: 777) that does not exist. This possibly resulted from the process completing before it could be tracked.', 'The following uncaught exception occurred in the tracking process:'] - _assert_warnings(caplog, expected_warnings) + utils._assert_warnings(caplog, expected_warnings) def test_validate_arguments(mocker): @@ -398,3 +392,13 @@ def is_set() -> bool: tracker._tracking_process.run() assert os.path.isfile(file_path) os.remove(file_path) + + +def test_formatting_before_tracking_stops(): + with pt.raises(RuntimeError) as error: + tracker = gput.Tracker() + str(tracker) + assert str(error.value) == ( + 'Cannot display the tracker in string or JSON format before tracking completes. Exit the content manager or call the stop() ' + 'method before calling to_json() or str()' + ) diff --git a/tests/utils.py b/tests/utils.py index 75fa4fa..f1acc37 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -11,6 +11,12 @@ def assert_args_list(mock, expected_args_list: list[tuple | dict], use_kwargs: b assert actual_args_list == expected_args_list +def _assert_warnings(caplog, expected_warnings: list[str]): + for expected_warning, record in zip(expected_warnings, caplog.records): + assert record.levelname == 'WARNING' + assert record.message == expected_warning + + def test_tracking_file( actual_tracking_file: str, expected_tracking_file: str, excluded_col: str | None = None, excluded_col_test=None, is_sub_tracking: bool = False): From f1a6aa01daebab6fb634ed45aeec5d773922b3a7 Mon Sep 17 00:00:00 2001 From: erikhuck Date: Mon, 19 May 2025 18:29:21 -0400 Subject: [PATCH 7/7] Completes tests for the next release --- .github/workflows/tests.yml | 2 +- requirements.txt | 1 + src/gpu_tracker/__main__.py | 2 +- src/gpu_tracker/_helper_classes.py | 4 +- src/gpu_tracker/sub_tracker.py | 10 +- src/gpu_tracker/tracker.py | 5 + .../sub-tracking-results/comparison_max.json | 1 + .../sub-tracking-results/comparison_max.txt | 351 ++++++++++++++++++ .../sub-tracking-results/comparison_mean.json | 1 + .../sub-tracking-results/comparison_mean.txt | 351 ++++++++++++++++++ .../sub-tracking-results/comparison_min.json | 1 + .../sub-tracking-results/comparison_min.txt | 351 ++++++++++++++++++ .../sub-tracking-results/comparison_std.json | 1 + .../sub-tracking-results/comparison_std.txt | 351 ++++++++++++++++++ .../files-to-combine/main.sub-tracking.csv | 1 + tests/data/sub-tracking-results/invalid1.csv | 6 + tests/data/sub-tracking-results/invalid2.csv | 4 + tests/data/sub-tracking-results/results-A.pkl | Bin 11883 -> 12036 bytes tests/data/sub-tracking-results/results-B.pkl | Bin 11883 -> 12072 bytes tests/data/sub-tracking-results/results-C.pkl | Bin 0 -> 12084 bytes tests/data/sub-tracking-results/results-D.pkl | Bin 0 -> 12117 bytes tests/data/sub-tracking-results/results-E.pkl | Bin 0 -> 12117 bytes tests/data/sub-tracking-results/results-F.pkl | Bin 0 -> 3842 bytes tests/data/sub-tracking-results/results-G.pkl | Bin 0 -> 3842 bytes .../sub-tracking-results/sub-tracking.csv | 1 + tests/test_cli.py | 10 +- tests/test_sub_tracker.py | 87 ++++- tests/test_tracker.py | 22 +- tests/utils.py | 6 + 29 files changed, 1539 insertions(+), 30 deletions(-) create mode 100644 tests/data/sub-tracking-results/comparison_max.json create mode 100644 tests/data/sub-tracking-results/comparison_max.txt create mode 100644 tests/data/sub-tracking-results/comparison_mean.json create mode 100644 tests/data/sub-tracking-results/comparison_mean.txt create mode 100644 tests/data/sub-tracking-results/comparison_min.json create mode 100644 tests/data/sub-tracking-results/comparison_min.txt create mode 100644 tests/data/sub-tracking-results/comparison_std.json create mode 100644 tests/data/sub-tracking-results/comparison_std.txt create mode 100644 tests/data/sub-tracking-results/invalid1.csv create mode 100644 tests/data/sub-tracking-results/invalid2.csv create mode 100644 tests/data/sub-tracking-results/results-C.pkl create mode 100644 tests/data/sub-tracking-results/results-D.pkl create mode 100644 tests/data/sub-tracking-results/results-E.pkl create mode 100644 tests/data/sub-tracking-results/results-F.pkl create mode 100644 tests/data/sub-tracking-results/results-G.pkl diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e7bf827..77c876f 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -24,7 +24,7 @@ jobs: - name: Install testing environment run: | python3 -m pip install --upgrade pip - python3 -m pip install pytest pytest-mock pytest-cov + python3 -m pip install pytest pytest-mock pytest-cov deepdiff - name: Install package uses: Wandalen/wretry.action@master with: diff --git a/requirements.txt b/requirements.txt index 3fae670..b8f6ba3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ psutil>=6.0.0 docopt>=0.6.2 pandas>=2.2.3 SQLAlchemy>=2.0.39 +tqdm>=4.67.1 diff --git a/src/gpu_tracker/__main__.py b/src/gpu_tracker/__main__.py index c4b6db4..688c918 100644 --- a/src/gpu_tracker/__main__.py +++ b/src/gpu_tracker/__main__.py @@ -60,7 +60,7 @@ def main(): files = args['-p'] if len(files) == 1: [directory] = files - files = [os.path.join(directory, file) for file in os.listdir(directory)] + files = sorted(os.path.join(directory, file) for file in os.listdir(directory)) SubTrackingAnalyzer(None, args['--stf']).combine_sub_tracking_files(files) else: if args['--cconfig'] is not None: diff --git a/src/gpu_tracker/_helper_classes.py b/src/gpu_tracker/_helper_classes.py index 1ae3188..5d91276 100644 --- a/src/gpu_tracker/_helper_classes.py +++ b/src/gpu_tracker/_helper_classes.py @@ -260,7 +260,7 @@ def load_timestamp_pairs(self, code_block_name: str) -> list[tuple[float, float] error_prefix = f'Sub-tracking file is invalid. Detected timestamp pair ({start_time}, {stop_time})' if pid1 != pid2: raise ValueError(f'{error_prefix} with differing process IDs: {pid1} and {pid2}.') - if start_time > stop_time: + if timestamp1.position > timestamp2.position: raise ValueError(f'{error_prefix} of process ID {pid1} with a start time greater than the stop time.') timestamp_pairs.append((start_time, stop_time)) return timestamp_pairs @@ -306,8 +306,6 @@ def timepoints(self): return self._timepoints def _write_static_data(self, data: _StaticData): - if self._file_name in _DataProxy._files_w_data: - raise RuntimeError('The static data for a CSV file must be created before the dynamic data.') static_data = dclass.asdict(data) self._create_table(static_data) self._write_data(static_data) diff --git a/src/gpu_tracker/sub_tracker.py b/src/gpu_tracker/sub_tracker.py index c8c0b8c..5d9f633 100644 --- a/src/gpu_tracker/sub_tracker.py +++ b/src/gpu_tracker/sub_tracker.py @@ -204,9 +204,7 @@ def __init__(self, file_path_map: dict[str, str]): :param file_path_map: Mapping of the name of each tracking session to the path of the pickle file containing the ``SubTrackingResults`` of the corresponding tracking sessions. Used to construct the ``results_map`` attribute. :raises ValueError: Raised if the code block results of each tracking session don't match. """ - for name in file_path_map.keys(): - self._name1 = name - break + [self._name1] = sorted(file_path_map.keys())[:1] self.results_map = dict[str, SubTrackingResults]() for name, file in file_path_map.items(): with open(file, 'rb') as file: @@ -221,7 +219,7 @@ def __init__(self, file_path_map: dict[str, str]): code_block_results2 = TrackingComparison._sort_code_block_results(results) if len(code_block_results1) != len(code_block_results2): raise ValueError( - f'All sub-tracking results must have the same number of code blocks. First has {len(code_block_results1)}' + f'All sub-tracking results must have the same number of code blocks. The first has {len(code_block_results1)}' f' code blocks but tracking session "{name2}" has {len(code_block_results2)} code blocks.' ) for code_block_results1_, code_block_results2_ in zip(code_block_results1, code_block_results2): @@ -273,7 +271,7 @@ def compare(self, statistic: str = 'mean') -> ComparisonResults: ) code_block_compute_times = TrackingComparison._get_code_block_comparisons( self.results_map, lambda code_block_result: code_block_result.compute_time[statistic].item() - ) if results1.code_block_results else dict() + ) if results1.code_block_results else dict[str, pd.Series]() return ComparisonResults( overall_resource_usage=overall_resource_usages, code_block_resource_usage=code_block_resource_usages, code_block_compute_time=code_block_compute_times @@ -319,7 +317,7 @@ def _get_code_block_comparisons(name_to_results: dict[str, SubTrackingResults], ] for name, results in name_to_results.items() ] ): - code_block_name = f'{" -> ".join({code_block_results.name for _, code_block_results in matching_code_block_results})}' + code_block_name = f'{" -> ".join(sorted({code_block_results.name for _, code_block_results in matching_code_block_results}))}' code_block_comparison = { name: get_statistic(code_block_results) for name, code_block_results in matching_code_block_results } diff --git a/src/gpu_tracker/tracker.py b/src/gpu_tracker/tracker.py index 273c3c3..f115a39 100644 --- a/src/gpu_tracker/tracker.py +++ b/src/gpu_tracker/tracker.py @@ -468,6 +468,11 @@ def to_json(self) -> dict[str, dict]: """ Constructs a dictionary of the computational-resource-usage measurements and their units. """ + if self.resource_usage is None: + raise RuntimeError( + 'Cannot display the tracker in string or JSON format before tracking completes. Exit the content manager or call the ' + 'stop() method before calling to_json() or str()' + ) return dclass.asdict(self.resource_usage) diff --git a/tests/data/sub-tracking-results/comparison_max.json b/tests/data/sub-tracking-results/comparison_max.json new file mode 100644 index 0000000..00aa5d1 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_max.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 1.5764930560000001, "B": 1.9856220160000002}, "descendants_ram": {"B": 11.457716224, "A": 11.671842816000002}, "combined_ram": {"B": 11.522973696, "A": 11.736928256}, "system_ram": {"A": 60.485840896000006, "B": 61.126602752000004}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.216, "B": 0.216}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 15, "B": 15}, "descendants_n_threads": {"A": 36, "B": 36}, "combined_n_threads": {"A": 51, "B": 51}, "cpu_system_sum_utilization_percent": {"B": 486.1, "A": 535.8}, "cpu_system_hardware_utilization_percent": {"B": 40.50833333333333, "A": 44.65}, "cpu_main_sum_utilization_percent": {"B": 99.1, "A": 101.6}, "cpu_main_hardware_utilization_percent": {"B": 8.258333333333333, "A": 8.466666666666667}, "cpu_descendants_sum_utilization_percent": {"B": 284.70000000000005, "A": 300.0}, "cpu_descendants_hardware_utilization_percent": {"B": 23.725000000000005, "A": 25.0}, "cpu_combined_sum_utilization_percent": {"B": 284.70000000000005, "A": 300.0}, "cpu_combined_hardware_utilization_percent": {"B": 23.725000000000005, "A": 25.0}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 0.08691712, "B": 0.087109632}, "Y": {"A": 1.5764930560000001, "B": 1.9856220160000002}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 1.2588974080000002, "B": 1.2777676800000002}}, "descendants_ram": {"X": {"B": 11.457716224, "A": 11.671842816000002}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"B": 11.522973696, "A": 11.736928256}, "Y": {"A": 1.5764930560000001, "B": 1.9856220160000002}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 1.2588974080000002, "B": 1.2777676800000002}}, "system_ram": {"X": {"A": 60.485840896000006, "B": 61.126602752000004}, "Y": {"A": 50.91651584, "B": 53.325389824000005}, "tmp.py:7 -> tmp.py:9": {"A": 50.539143168, "B": 52.042661888000005}, "tmp.py:37 -> tmp.py:38": {"A": 51.044614144, "B": 52.36287488000001}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 0.216, "B": 0.216}, "Y": {"A": 0.216, "B": 0.216}, "tmp.py:7 -> tmp.py:9": {"A": 0.216, "B": 0.216}, "tmp.py:37 -> tmp.py:38": {"A": 0.216, "B": 0.216}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 15.0, "B": 15.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "descendants_n_threads": {"X": {"A": 36.0, "B": 36.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 51.0, "B": 51.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "cpu_system_sum_utilization_percent": {"X": {"B": 486.1, "A": 535.8}, "Y": {"B": 149.20000000000002, "A": 158.79999999999998}, "tmp.py:7 -> tmp.py:9": {"A": 115.69999999999999, "B": 116.5}, "tmp.py:37 -> tmp.py:38": {"B": 165.8, "A": 405.5}}, "cpu_system_hardware_utilization_percent": {"X": {"B": 40.50833333333333, "A": 44.65}, "Y": {"B": 12.433333333333335, "A": 13.233333333333333}, "tmp.py:7 -> tmp.py:9": {"A": 9.641666666666666, "B": 9.708333333333334}, "tmp.py:37 -> tmp.py:38": {"B": 13.816666666666668, "A": 33.791666666666664}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 99.1, "A": 101.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 97.7, "A": 97.9}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 8.258333333333333, "A": 8.466666666666667}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 8.141666666666667, "A": 8.158333333333333}}, "cpu_descendants_sum_utilization_percent": {"X": {"B": 284.70000000000005, "A": 300.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"B": 23.725000000000005, "A": 25.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"B": 284.70000000000005, "A": 300.0}, "Y": {"B": 99.1, "A": 101.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 97.7, "A": 97.9}}, "cpu_combined_hardware_utilization_percent": {"X": {"B": 23.725000000000005, "A": 25.0}, "Y": {"B": 8.258333333333333, "A": 8.466666666666667}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 8.141666666666667, "A": 8.158333333333333}}}, "code_block_compute_time": {"X": {"A": 3.8075716495513916, "B": 3.8075716495513916}, "Y": {"A": 1.847283124923706, "B": 1.847283124923706}, "tmp.py:7 -> tmp.py:9": {"A": 1.0934793949127197, "B": 1.0934793949127197}, "tmp.py:37 -> tmp.py:38": {"A": 1.2065885066986084, "B": 1.2065885066986084}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_max.txt b/tests/data/sub-tracking-results/comparison_max.txt new file mode 100644 index 0000000..c2d23da --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_max.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 1.57649306 1.98562202 + Descendants Ram: + B A + 11.45771622 11.67184282 + Combined Ram: + B A + 11.5229737 11.73692826 + System Ram: + A B + 60.4858409 61.12660275 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 15 15 + Descendants N Threads: + A B + 36 36 + Combined N Threads: + A B + 51 51 + Cpu System Sum Utilization Percent: + B A + 486.1 535.8 + Cpu System Hardware Utilization Percent: + B A + 40.50833333 44.65 + Cpu Main Sum Utilization Percent: + B A + 99.1 101.6 + Cpu Main Hardware Utilization Percent: + B A + 8.25833333 8.46666667 + Cpu Descendants Sum Utilization Percent: + B A + 284.7 300.0 + Cpu Descendants Hardware Utilization Percent: + B A + 23.725 25.0 + Cpu Combined Sum Utilization Percent: + B A + 284.7 300.0 + Cpu Combined Hardware Utilization Percent: + B A + 23.725 25.0 +Code Block Resource Usage: + Main Ram: + X: + A B + 0.08691712 0.08710963 + Y: + A B + 1.57649306 1.98562202 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 1.25889741 1.27776768 + Descendants Ram: + X: + B A + 11.45771622 11.67184282 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + B A + 11.5229737 11.73692826 + Y: + A B + 1.57649306 1.98562202 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 1.25889741 1.27776768 + System Ram: + X: + A B + 60.4858409 61.12660275 + Y: + A B + 50.91651584 53.32538982 + tmp.py:7 -> tmp.py:9: + A B + 50.53914317 52.04266189 + tmp.py:37 -> tmp.py:38: + A B + 51.04461414 52.36287488 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.216 0.216 + Y: + A B + 0.216 0.216 + tmp.py:7 -> tmp.py:9: + A B + 0.216 0.216 + tmp.py:37 -> tmp.py:38: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 15.0 15.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Descendants N Threads: + X: + A B + 36.0 36.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 51.0 51.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Cpu System Sum Utilization Percent: + X: + B A + 486.1 535.8 + Y: + B A + 149.2 158.8 + tmp.py:7 -> tmp.py:9: + A B + 115.7 116.5 + tmp.py:37 -> tmp.py:38: + B A + 165.8 405.5 + Cpu System Hardware Utilization Percent: + X: + B A + 40.50833333 44.65 + Y: + B A + 12.43333333 13.23333333 + tmp.py:7 -> tmp.py:9: + A B + 9.64166667 9.70833333 + tmp.py:37 -> tmp.py:38: + B A + 13.81666667 33.79166667 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 99.1 101.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 97.7 97.9 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 8.25833333 8.46666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 8.14166667 8.15833333 + Cpu Descendants Sum Utilization Percent: + X: + B A + 284.7 300.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + B A + 23.725 25.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + B A + 284.7 300.0 + Y: + B A + 99.1 101.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 97.7 97.9 + Cpu Combined Hardware Utilization Percent: + X: + B A + 23.725 25.0 + Y: + B A + 8.25833333 8.46666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 8.14166667 8.15833333 +Code Block Compute Time: + X: + A B + 3.80757165 3.80757165 + Y: + A B + 1.84728312 1.84728312 + tmp.py:7 -> tmp.py:9: + A B + 1.09347939 1.09347939 + tmp.py:37 -> tmp.py:38: + A B + 1.20658851 1.20658851 diff --git a/tests/data/sub-tracking-results/comparison_mean.json b/tests/data/sub-tracking-results/comparison_mean.json new file mode 100644 index 0000000..ef90c75 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_mean.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 0.6006015590399999, "B": 0.61011197952}, "descendants_ram": {"B": 2.43691864064, "A": 2.50271186944}, "combined_ram": {"B": 3.036548300799999, "A": 3.09283651584}, "system_ram": {"A": 52.59960434688001, "B": 53.58121992192}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.2160000000000001, "B": 0.2160000000000001}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 13.96, "B": 13.96}, "descendants_n_threads": {"A": 17.28, "B": 17.28}, "combined_n_threads": {"A": 31.24, "B": 31.24}, "cpu_system_sum_utilization_percent": {"B": 230.76, "A": 242.06}, "cpu_system_hardware_utilization_percent": {"B": 19.230000000000004, "A": 20.17166666666667}, "cpu_main_sum_utilization_percent": {"A": 48.172, "B": 48.72}, "cpu_main_hardware_utilization_percent": {"A": 4.014333333333333, "B": 4.06}, "cpu_descendants_sum_utilization_percent": {"B": 110.17999999999998, "A": 111.512}, "cpu_descendants_hardware_utilization_percent": {"B": 9.181666666666667, "A": 9.292666666666667}, "cpu_combined_sum_utilization_percent": {"B": 158.89999999999998, "A": 159.68400000000003}, "cpu_combined_hardware_utilization_percent": {"B": 13.241666666666669, "A": 13.307000000000002}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 0.086916096, "B": 0.08710894933333334}, "Y": {"A": 1.028769792, "B": 1.1594874880000001}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"B": 1.0999783424000003, "A": 1.1211156480000002}}, "descendants_ram": {"X": {"B": 5.076913834666667, "A": 5.213983061333333}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"B": 5.142184618666666, "A": 5.279072256000001}, "Y": {"A": 1.028769792, "B": 1.1594874880000001}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"B": 1.0999783424000003, "A": 1.1211156480000002}}, "system_ram": {"X": {"A": 54.712742229333344, "B": 55.171221503999995}, "Y": {"A": 50.42655232, "B": 52.71240704}, "tmp.py:7 -> tmp.py:9": {"A": 50.539143168, "B": 52.042661888000005}, "tmp.py:37 -> tmp.py:38": {"A": 50.689142272, "B": 52.000836403200005}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 0.21600000000000005, "B": 0.21600000000000005}, "Y": {"A": 0.216, "B": 0.216}, "tmp.py:7 -> tmp.py:9": {"A": 0.216, "B": 0.216}, "tmp.py:37 -> tmp.py:38": {"A": 0.216, "B": 0.21600000000000003}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 15.0, "B": 15.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "descendants_n_threads": {"X": {"A": 36.0, "B": 36.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 51.0, "B": 51.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "cpu_system_sum_utilization_percent": {"X": {"A": 321.575, "B": 330.06666666666666}, "Y": {"B": 144.35000000000002, "A": 152.05}, "tmp.py:7 -> tmp.py:9": {"A": 115.69999999999999, "B": 116.5}, "tmp.py:37 -> tmp.py:38": {"B": 140.3, "A": 190.02499999999998}}, "cpu_system_hardware_utilization_percent": {"X": {"A": 26.797916666666666, "B": 27.50555555555556}, "Y": {"B": 12.029166666666669, "A": 12.670833333333334}, "tmp.py:7 -> tmp.py:9": {"A": 9.641666666666666, "B": 9.708333333333334}, "tmp.py:37 -> tmp.py:38": {"B": 11.691666666666666, "A": 15.835416666666665}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 92.85, "A": 93.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"A": 93.625, "B": 93.82}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"B": 7.737499999999999, "A": 7.8}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"A": 7.802083333333333, "B": 7.818333333333333}}, "cpu_descendants_sum_utilization_percent": {"X": {"B": 229.54166666666663, "A": 232.3166666666667}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"B": 19.128472222222225, "A": 19.35972222222222}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"B": 229.54166666666663, "A": 232.3166666666667}, "Y": {"B": 92.85, "A": 93.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"A": 93.625, "B": 93.82}}, "cpu_combined_hardware_utilization_percent": {"X": {"B": 19.128472222222225, "A": 19.35972222222222}, "Y": {"B": 7.737499999999999, "A": 7.8}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"A": 7.802083333333333, "B": 7.818333333333333}}}, "code_block_compute_time": {"X": {"A": 3.735934352874756, "B": 3.735934352874756}, "Y": {"A": 1.847283124923706, "B": 1.847283124923706}, "tmp.py:7 -> tmp.py:9": {"A": 1.0934793949127197, "B": 1.0934793949127197}, "tmp.py:37 -> tmp.py:38": {"A": 1.1289910554885865, "B": 1.1289910554885865}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_mean.txt b/tests/data/sub-tracking-results/comparison_mean.txt new file mode 100644 index 0000000..793ea82 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_mean.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 0.60060156 0.61011198 + Descendants Ram: + B A + 2.43691864 2.50271187 + Combined Ram: + B A + 3.0365483 3.09283652 + System Ram: + A B + 52.59960435 53.58121992 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 13.96 13.96 + Descendants N Threads: + A B + 17.28 17.28 + Combined N Threads: + A B + 31.24 31.24 + Cpu System Sum Utilization Percent: + B A + 230.76 242.06 + Cpu System Hardware Utilization Percent: + B A + 19.23 20.17166667 + Cpu Main Sum Utilization Percent: + A B + 48.172 48.72 + Cpu Main Hardware Utilization Percent: + A B + 4.01433333 4.06 + Cpu Descendants Sum Utilization Percent: + B A + 110.18 111.512 + Cpu Descendants Hardware Utilization Percent: + B A + 9.18166667 9.29266667 + Cpu Combined Sum Utilization Percent: + B A + 158.9 159.684 + Cpu Combined Hardware Utilization Percent: + B A + 13.24166667 13.307 +Code Block Resource Usage: + Main Ram: + X: + A B + 0.0869161 0.08710895 + Y: + A B + 1.02876979 1.15948749 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + B A + 1.09997834 1.12111565 + Descendants Ram: + X: + B A + 5.07691383 5.21398306 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + B A + 5.14218462 5.27907226 + Y: + A B + 1.02876979 1.15948749 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + B A + 1.09997834 1.12111565 + System Ram: + X: + A B + 54.71274223 55.1712215 + Y: + A B + 50.42655232 52.71240704 + tmp.py:7 -> tmp.py:9: + A B + 50.53914317 52.04266189 + tmp.py:37 -> tmp.py:38: + A B + 50.68914227 52.0008364 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.216 0.216 + Y: + A B + 0.216 0.216 + tmp.py:7 -> tmp.py:9: + A B + 0.216 0.216 + tmp.py:37 -> tmp.py:38: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 15.0 15.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Descendants N Threads: + X: + A B + 36.0 36.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 51.0 51.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Cpu System Sum Utilization Percent: + X: + A B + 321.575 330.06666667 + Y: + B A + 144.35 152.05 + tmp.py:7 -> tmp.py:9: + A B + 115.7 116.5 + tmp.py:37 -> tmp.py:38: + B A + 140.3 190.025 + Cpu System Hardware Utilization Percent: + X: + A B + 26.79791667 27.50555556 + Y: + B A + 12.02916667 12.67083333 + tmp.py:7 -> tmp.py:9: + A B + 9.64166667 9.70833333 + tmp.py:37 -> tmp.py:38: + B A + 11.69166667 15.83541667 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 92.85 93.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + A B + 93.625 93.82 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + B A + 7.7375 7.8 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + A B + 7.80208333 7.81833333 + Cpu Descendants Sum Utilization Percent: + X: + B A + 229.54166667 232.31666667 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + B A + 19.12847222 19.35972222 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + B A + 229.54166667 232.31666667 + Y: + B A + 92.85 93.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + A B + 93.625 93.82 + Cpu Combined Hardware Utilization Percent: + X: + B A + 19.12847222 19.35972222 + Y: + B A + 7.7375 7.8 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + A B + 7.80208333 7.81833333 +Code Block Compute Time: + X: + A B + 3.73593435 3.73593435 + Y: + A B + 1.84728312 1.84728312 + tmp.py:7 -> tmp.py:9: + A B + 1.09347939 1.09347939 + tmp.py:37 -> tmp.py:38: + A B + 1.12899106 1.12899106 diff --git a/tests/data/sub-tracking-results/comparison_min.json b/tests/data/sub-tracking-results/comparison_min.json new file mode 100644 index 0000000..ddbdebc --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_min.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 0.086913024, "B": 0.087105536}, "descendants_ram": {"A": 0.0, "B": 0.0}, "combined_ram": {"B": 0.277377024, "A": 0.27824128000000004}, "system_ram": {"A": 49.029361664, "B": 49.073319936000004}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.216, "B": 0.216}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 13, "B": 13}, "descendants_n_threads": {"A": 0, "B": 0}, "combined_n_threads": {"A": 13, "B": 13}, "cpu_system_sum_utilization_percent": {"A": 115.10000000000001, "B": 116.5}, "cpu_system_hardware_utilization_percent": {"A": 9.591666666666667, "B": 9.708333333333334}, "cpu_main_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_main_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_descendants_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_descendants_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "cpu_combined_sum_utilization_percent": {"A": 85.6, "B": 86.6}, "cpu_combined_hardware_utilization_percent": {"A": 7.133333333333333, "B": 7.216666666666666}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 0.086913024, "B": 0.087105536}, "Y": {"B": 0.33335296000000003, "A": 0.48104652800000003}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 0.861704192, "B": 0.8678359040000001}}, "descendants_ram": {"X": {"B": 0.211996672, "A": 0.21314764800000002}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"B": 0.277377024, "A": 0.27824128000000004}, "Y": {"B": 0.33335296000000003, "A": 0.48104652800000003}, "tmp.py:7 -> tmp.py:9": {"B": 0.8887336960000001, "A": 1.21389056}, "tmp.py:37 -> tmp.py:38": {"A": 0.861704192, "B": 0.8678359040000001}}, "system_ram": {"X": {"A": 49.029361664, "B": 49.073319936000004}, "Y": {"A": 49.9365888, "B": 52.099424256000006}, "tmp.py:7 -> tmp.py:9": {"A": 50.539143168, "B": 52.042661888000005}, "tmp.py:37 -> tmp.py:38": {"A": 50.392174592, "B": 51.668799488000005}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 0.216, "B": 0.216}, "Y": {"A": 0.216, "B": 0.216}, "tmp.py:7 -> tmp.py:9": {"A": 0.216, "B": 0.216}, "tmp.py:37 -> tmp.py:38": {"A": 0.216, "B": 0.216}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 15.0, "B": 15.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "descendants_n_threads": {"X": {"A": 36.0, "B": 36.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 51.0, "B": 51.0}, "Y": {"A": 13.0, "B": 13.0}, "tmp.py:7 -> tmp.py:9": {"A": 13.0, "B": 13.0}, "tmp.py:37 -> tmp.py:38": {"A": 13.0, "B": 13.0}}, "cpu_system_sum_utilization_percent": {"X": {"A": 122.60000000000001, "B": 157.3}, "Y": {"B": 139.5, "A": 145.3}, "tmp.py:7 -> tmp.py:9": {"A": 115.69999999999999, "B": 116.5}, "tmp.py:37 -> tmp.py:38": {"A": 115.10000000000001, "B": 128.0}}, "cpu_system_hardware_utilization_percent": {"X": {"A": 10.216666666666667, "B": 13.108333333333334}, "Y": {"B": 11.625, "A": 12.108333333333334}, "tmp.py:7 -> tmp.py:9": {"A": 9.641666666666666, "B": 9.708333333333334}, "tmp.py:37 -> tmp.py:38": {"A": 9.591666666666667, "B": 10.666666666666666}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 85.6, "B": 86.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 89.6, "A": 90.4}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 7.133333333333333, "B": 7.216666666666666}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 7.466666666666666, "A": 7.533333333333334}}, "cpu_descendants_sum_utilization_percent": {"X": {"B": 91.1, "A": 91.4}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"B": 7.591666666666666, "A": 7.616666666666667}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"B": 91.1, "A": 91.4}, "Y": {"A": 85.6, "B": 86.6}, "tmp.py:7 -> tmp.py:9": {"A": 89.5, "B": 94.1}, "tmp.py:37 -> tmp.py:38": {"B": 89.6, "A": 90.4}}, "cpu_combined_hardware_utilization_percent": {"X": {"B": 7.591666666666666, "A": 7.616666666666667}, "Y": {"A": 7.133333333333333, "B": 7.216666666666666}, "tmp.py:7 -> tmp.py:9": {"A": 7.458333333333333, "B": 7.841666666666666}, "tmp.py:37 -> tmp.py:38": {"B": 7.466666666666666, "A": 7.533333333333334}}}, "code_block_compute_time": {"X": {"A": 3.612563133239746, "B": 3.612563133239746}, "Y": {"A": 1.847283124923706, "B": 1.847283124923706}, "tmp.py:7 -> tmp.py:9": {"A": 1.0934793949127197, "B": 1.0934793949127197}, "tmp.py:37 -> tmp.py:38": {"A": 1.0983691215515137, "B": 1.0983691215515137}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_min.txt b/tests/data/sub-tracking-results/comparison_min.txt new file mode 100644 index 0000000..08bd1ad --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_min.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 0.08691302 0.08710554 + Descendants Ram: + A B + 0.0 0.0 + Combined Ram: + B A + 0.27737702 0.27824128 + System Ram: + A B + 49.02936166 49.07331994 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 13 13 + Descendants N Threads: + A B + 0 0 + Combined N Threads: + A B + 13 13 + Cpu System Sum Utilization Percent: + A B + 115.1 116.5 + Cpu System Hardware Utilization Percent: + A B + 9.59166667 9.70833333 + Cpu Main Sum Utilization Percent: + A B + 0.0 0.0 + Cpu Main Hardware Utilization Percent: + A B + 0.0 0.0 + Cpu Descendants Sum Utilization Percent: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + A B + 85.6 86.6 + Cpu Combined Hardware Utilization Percent: + A B + 7.13333333 7.21666667 +Code Block Resource Usage: + Main Ram: + X: + A B + 0.08691302 0.08710554 + Y: + B A + 0.33335296 0.48104653 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 0.86170419 0.8678359 + Descendants Ram: + X: + B A + 0.21199667 0.21314765 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + B A + 0.27737702 0.27824128 + Y: + B A + 0.33335296 0.48104653 + tmp.py:7 -> tmp.py:9: + B A + 0.8887337 1.21389056 + tmp.py:37 -> tmp.py:38: + A B + 0.86170419 0.8678359 + System Ram: + X: + A B + 49.02936166 49.07331994 + Y: + A B + 49.9365888 52.09942426 + tmp.py:7 -> tmp.py:9: + A B + 50.53914317 52.04266189 + tmp.py:37 -> tmp.py:38: + A B + 50.39217459 51.66879949 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.216 0.216 + Y: + A B + 0.216 0.216 + tmp.py:7 -> tmp.py:9: + A B + 0.216 0.216 + tmp.py:37 -> tmp.py:38: + A B + 0.216 0.216 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 15.0 15.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Descendants N Threads: + X: + A B + 36.0 36.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 51.0 51.0 + Y: + A B + 13.0 13.0 + tmp.py:7 -> tmp.py:9: + A B + 13.0 13.0 + tmp.py:37 -> tmp.py:38: + A B + 13.0 13.0 + Cpu System Sum Utilization Percent: + X: + A B + 122.6 157.3 + Y: + B A + 139.5 145.3 + tmp.py:7 -> tmp.py:9: + A B + 115.7 116.5 + tmp.py:37 -> tmp.py:38: + A B + 115.1 128.0 + Cpu System Hardware Utilization Percent: + X: + A B + 10.21666667 13.10833333 + Y: + B A + 11.625 12.10833333 + tmp.py:7 -> tmp.py:9: + A B + 9.64166667 9.70833333 + tmp.py:37 -> tmp.py:38: + A B + 9.59166667 10.66666667 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 85.6 86.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 89.6 90.4 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 7.13333333 7.21666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 7.46666667 7.53333333 + Cpu Descendants Sum Utilization Percent: + X: + B A + 91.1 91.4 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + B A + 7.59166667 7.61666667 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + B A + 91.1 91.4 + Y: + A B + 85.6 86.6 + tmp.py:7 -> tmp.py:9: + A B + 89.5 94.1 + tmp.py:37 -> tmp.py:38: + B A + 89.6 90.4 + Cpu Combined Hardware Utilization Percent: + X: + B A + 7.59166667 7.61666667 + Y: + A B + 7.13333333 7.21666667 + tmp.py:7 -> tmp.py:9: + A B + 7.45833333 7.84166667 + tmp.py:37 -> tmp.py:38: + B A + 7.46666667 7.53333333 +Code Block Compute Time: + X: + A B + 3.61256313 3.61256313 + Y: + A B + 1.84728312 1.84728312 + tmp.py:7 -> tmp.py:9: + A B + 1.09347939 1.09347939 + tmp.py:37 -> tmp.py:38: + A B + 1.09836912 1.09836912 diff --git a/tests/data/sub-tracking-results/comparison_std.json b/tests/data/sub-tracking-results/comparison_std.json new file mode 100644 index 0000000..bead327 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_std.json @@ -0,0 +1 @@ +{"overall_resource_usage": {"main_ram": {"A": 0.5269852839221266, "B": 0.5615874566864882}, "descendants_ram": {"B": 3.784130954436396, "A": 3.8823366111559143}, "combined_ram": {"B": 3.4701130113065624, "A": 3.568347601613889}, "system_ram": {"B": 3.1267382538015376, "A": 3.467963700517139}, "main_gpu_ram": {"A": 0.0, "B": 0.0}, "descendants_gpu_ram": {"A": 0.0, "B": 0.0}, "combined_gpu_ram": {"A": 0.0, "B": 0.0}, "system_gpu_ram": {"A": 0.0, "B": 0.0}, "gpu_sum_utilization_percent": {"A": 0.0, "B": 0.0}, "gpu_hardware_utilization_percent": {"A": 0.0, "B": 0.0}, "main_n_threads": {"A": 1.0, "B": 1.0}, "descendants_n_threads": {"A": 17.97220075561143, "B": 17.97220075561143}, "combined_n_threads": {"A": 18.973665961010276, "B": 18.973665961010276}, "cpu_system_sum_utilization_percent": {"B": 112.8486916184676, "A": 119.47923668989523}, "cpu_system_hardware_utilization_percent": {"B": 9.404057634872295, "A": 9.956603057491263}, "cpu_main_sum_utilization_percent": {"A": 46.379327463860456, "B": 46.86729349983846}, "cpu_main_hardware_utilization_percent": {"A": 3.8649439553217033, "B": 3.905607791653205}, "cpu_descendants_sum_utilization_percent": {"B": 124.94461172855756, "A": 128.9478974469921}, "cpu_descendants_hardware_utilization_percent": {"B": 10.412050977379797, "A": 10.745658120582675}, "cpu_combined_sum_utilization_percent": {"B": 84.09375719992553, "A": 89.63866433632305}, "cpu_combined_hardware_utilization_percent": {"B": 7.007813099993776, "A": 7.469888694693585}}, "code_block_resource_usage": {"main_ram": {"X": {"A": 1.8524857010835653e-06, "B": 1.8524857010835653e-06}, "Y": {"A": 0.7745976683760593, "B": 0.7745976683760593}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.1329195703851683, "B": 0.1329195703851683}}, "descendants_ram": {"X": {"A": 4.339827962944292, "B": 4.339827962944292}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_ram": {"X": {"A": 4.3398268840710665, "B": 4.3398268840710665}, "Y": {"A": 0.7745976683760593, "B": 0.7745976683760593}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.1329195703851683, "B": 0.1329195703851683}}, "system_ram": {"X": {"A": 4.2288752007728245, "B": 4.2288752007728245}, "Y": {"A": 0.692913055052061, "B": 0.692913055052061}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.19767745567246933, "B": 0.19767745567246933}}, "main_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_gpu_ram": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "system_gpu_ram": {"X": {"A": 5.797950651443767e-17, "B": 5.797950651443767e-17}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "gpu_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "main_n_threads": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "descendants_n_threads": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "combined_n_threads": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_system_sum_utilization_percent": {"X": {"A": 108.67268725188244, "B": 108.67268725188244}, "Y": {"A": 9.545941546018371, "B": 9.545941546018371}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 100.27071570788466, "B": 100.27071570788466}}, "cpu_system_hardware_utilization_percent": {"X": {"A": 9.056057270990204, "B": 9.056057270990204}, "Y": {"A": 0.7954951288348647, "B": 0.7954951288348647}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 8.355892975657055, "B": 8.355892975657055}}, "cpu_main_sum_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 11.313708498984761, "B": 11.313708498984761}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 2.5998626337338906, "B": 2.5998626337338906}}, "cpu_main_hardware_utilization_percent": {"X": {"A": 0.0, "B": 0.0}, "Y": {"A": 0.9428090415820638, "B": 0.9428090415820638}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.21665521947782385, "B": 0.21665521947782385}}, "cpu_descendants_sum_utilization_percent": {"X": {"A": 84.69719146715694, "B": 84.69719146715694}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_descendants_hardware_utilization_percent": {"X": {"A": 7.0580992889297445, "B": 7.0580992889297445}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.0, "B": 0.0}}, "cpu_combined_sum_utilization_percent": {"X": {"A": 84.69719146715694, "B": 84.69719146715694}, "Y": {"A": 11.313708498984761, "B": 11.313708498984761}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 2.5998626337338906, "B": 2.5998626337338906}}, "cpu_combined_hardware_utilization_percent": {"X": {"A": 7.0580992889297445, "B": 7.0580992889297445}, "Y": {"A": 0.9428090415820638, "B": 0.9428090415820638}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.21665521947782385, "B": 0.21665521947782385}}}, "code_block_compute_time": {"X": {"A": 0.07070437511070823, "B": 0.07070437511070823}, "Y": {"A": 0.0, "B": 0.0}, "tmp.py:7 -> tmp.py:9": {"A": 0.0, "B": 0.0}, "tmp.py:37 -> tmp.py:38": {"A": 0.035251379040035336, "B": 0.035251379040035336}}} \ No newline at end of file diff --git a/tests/data/sub-tracking-results/comparison_std.txt b/tests/data/sub-tracking-results/comparison_std.txt new file mode 100644 index 0000000..70dbf99 --- /dev/null +++ b/tests/data/sub-tracking-results/comparison_std.txt @@ -0,0 +1,351 @@ +Overall Resource Usage: + Main Ram: + A B + 0.52698528 0.56158746 + Descendants Ram: + B A + 3.78413095 3.88233661 + Combined Ram: + B A + 3.47011301 3.5683476 + System Ram: + B A + 3.12673825 3.4679637 + Main Gpu Ram: + A B + 0.0 0.0 + Descendants Gpu Ram: + A B + 0.0 0.0 + Combined Gpu Ram: + A B + 0.0 0.0 + System Gpu Ram: + A B + 0.0 0.0 + Gpu Sum Utilization Percent: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + A B + 0.0 0.0 + Main N Threads: + A B + 1.0 1.0 + Descendants N Threads: + A B + 17.97220076 17.97220076 + Combined N Threads: + A B + 18.97366596 18.97366596 + Cpu System Sum Utilization Percent: + B A + 112.84869162 119.47923669 + Cpu System Hardware Utilization Percent: + B A + 9.40405763 9.95660306 + Cpu Main Sum Utilization Percent: + A B + 46.37932746 46.8672935 + Cpu Main Hardware Utilization Percent: + A B + 3.86494396 3.90560779 + Cpu Descendants Sum Utilization Percent: + B A + 124.94461173 128.94789745 + Cpu Descendants Hardware Utilization Percent: + B A + 10.41205098 10.74565812 + Cpu Combined Sum Utilization Percent: + B A + 84.0937572 89.63866434 + Cpu Combined Hardware Utilization Percent: + B A + 7.0078131 7.46988869 +Code Block Resource Usage: + Main Ram: + X: + A B + 1.85e-06 1.85e-06 + Y: + A B + 0.77459767 0.77459767 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.13291957 0.13291957 + Descendants Ram: + X: + A B + 4.33982796 4.33982796 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Ram: + X: + A B + 4.33982688 4.33982688 + Y: + A B + 0.77459767 0.77459767 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.13291957 0.13291957 + System Ram: + X: + A B + 4.2288752 4.2288752 + Y: + A B + 0.69291306 0.69291306 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.19767746 0.19767746 + Main Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + System Gpu Ram: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Gpu Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Main N Threads: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Descendants N Threads: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Combined N Threads: + X: + A B + 0.0 0.0 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu System Sum Utilization Percent: + X: + A B + 108.67268725 108.67268725 + Y: + A B + 9.54594155 9.54594155 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 100.27071571 100.27071571 + Cpu System Hardware Utilization Percent: + X: + A B + 9.05605727 9.05605727 + Y: + A B + 0.79549513 0.79549513 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 8.35589298 8.35589298 + Cpu Main Sum Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 11.3137085 11.3137085 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 2.59986263 2.59986263 + Cpu Main Hardware Utilization Percent: + X: + A B + 0.0 0.0 + Y: + A B + 0.94280904 0.94280904 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.21665522 0.21665522 + Cpu Descendants Sum Utilization Percent: + X: + A B + 84.69719147 84.69719147 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Descendants Hardware Utilization Percent: + X: + A B + 7.05809929 7.05809929 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.0 0.0 + Cpu Combined Sum Utilization Percent: + X: + A B + 84.69719147 84.69719147 + Y: + A B + 11.3137085 11.3137085 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 2.59986263 2.59986263 + Cpu Combined Hardware Utilization Percent: + X: + A B + 7.05809929 7.05809929 + Y: + A B + 0.94280904 0.94280904 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.21665522 0.21665522 +Code Block Compute Time: + X: + A B + 0.07070438 0.07070438 + Y: + A B + 0.0 0.0 + tmp.py:7 -> tmp.py:9: + A B + 0.0 0.0 + tmp.py:37 -> tmp.py:38: + A B + 0.03525138 0.03525138 diff --git a/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv b/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv index d71cf27..de77e34 100644 --- a/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv +++ b/tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv @@ -1,6 +1,7 @@ process_id,code_block_name,position,timestamp 1723727,Y,0,1745449624.6481984 1723727,Y,1,1745449626.4954815 +1723727,Y,0,1745449627.6054815 1723727,tmp.py:9,0,1745449626.5598955 1723727,tmp.py:9,1,1745449627.653375 1723727,tmp.py:38,0,1745449627.6730132 diff --git a/tests/data/sub-tracking-results/invalid1.csv b/tests/data/sub-tracking-results/invalid1.csv new file mode 100644 index 0000000..1fb2c72 --- /dev/null +++ b/tests/data/sub-tracking-results/invalid1.csv @@ -0,0 +1,6 @@ +process_id,code_block_name,position,timestamp +1723811,X,0,1745449609.7528221 +1723811,X,1,1745449609.7528222 +1723812,X,0,1745449609.7528223 +1723812,X,1,1745449613.5325918 +1723811,X,1,1745449613.5325919 diff --git a/tests/data/sub-tracking-results/invalid2.csv b/tests/data/sub-tracking-results/invalid2.csv new file mode 100644 index 0000000..189d8f5 --- /dev/null +++ b/tests/data/sub-tracking-results/invalid2.csv @@ -0,0 +1,4 @@ +process_id,code_block_name,position,timestamp +1723811,X,0,1745449613.5388888 +1723811,X,0,1745449613.5325918 +1723811,X,1,1745449609.7528222 diff --git a/tests/data/sub-tracking-results/results-A.pkl b/tests/data/sub-tracking-results/results-A.pkl index 1999d1b60b403dc193ee96f797ed43f467b8b274..d692f0fd14f1e49ff85cc99ccc23b614c1b14146 100644 GIT binary patch literal 12036 zcmeHMYjhMv7M?s!NDN8LLj)ouJd(hUgs6y!QzWc_1A!33V?&tqOghuk%q!E~CQ$=H zRCV;QA zsX3>r>(;$hx2kT{cW+Hw{OWUN6G?Ke(=XY?0OMG|2F!wF*S{4xvr4kxg&SO6=S)_R z+@heYQi>KUtxBPMmG5pgz_{ItoZ)A@^^9P4_yVlCA;5T8MNXc`h|C@2J17xv^Ed;F zoJX^}ydoR$GH$`_VZ4l!4G4;yTITjS7EDv?FkqpaqlwtuF1uh3$|!PQo89sxNJG0UTpERSSWV-ElT?ewLMObxlgI6sJQbxNF=jbM8)1-s}ypX za-!g1+)Mz3xI8(wAw=vpgO~}#%9A#a+_dVXWvfM52E%j1xT5kTb(2gciq!GOq9rf9 z{sztEscgOeNLO=e`22um z&Xi^kvMqbeff_T7&hHAR|6(kK%Ni1xboMe|=U|(J^+62Mv za$p0ony}em`>@91oXg;{xLj;kT)$AO8cghj?D8iQ=6B2(dyZan`N8atuV)XZ6=0In z{#elxLi)|-14*N9ub{aiOeQ+grENTHSYNGwYFmedlS&OF*b1`7JKAXNOL3~YgC-HK zhPgC}Z2rkXG>P=SD-cF9jSVAp!-cn%3&0M>_IMA^^xyUJpj(d8dsr1X-MKO$o>t(l zvT>P%cO=oIu9CkW+&T45nxrk2Nng>Z&Gzsm9bV}EMCFTlOOKDBm6-hAJy}mrDxpc^ z`@Ffkf*WK{jVwI%rVXQSSx;-pvFW>N9!#Cs84@75w; z&Fx3_epG-q9bvmxj+%tV{&V?u=kYXDcPQ}})p>QOt@HU~ds>I0GLAX&Z*CTvb!2f{ z@p}W2qS|MiHJ+ZEr~u-~*Mo5>w%}@~F^|g+z1nreljA%tuL3#KtjO^mmcbQ4tXIfY8Slk@L4|~ojn9Sg z`F@bW(0qR=!w1e@s_^;Hg6~K<$-}t3Ht;3@@#|T^!2)1=MM0&cI(#0x%gffQ)jmRt zAhI5Am2J&wdZ(wyCQJOr}`*W!Y`7KN~N+ahO06Qa!;FY#2a-yxl%{a+tE;zovCc#!O`2qGBk+r!QJL?vd72rV2DS`+F z;;@k}EvLErF(5`v!8Wf&9Q4Qgfvp;HB7QEB?+_A z1P#`})JC}dU_m+JrOC-omy@x#hzMWlb)$kL z@Rm|F4AHSYG70v8B#JA9gw+R~qq$0Hv)~s%FT59LjW`o=)`YWraMp^mHk>Vi%(z7O z42&|D+-NQ#5nwz60Hg3W;FJ?d_$2@iVKhm+n{i9H?bo<13Bo=bL}#!}0q~BKRpgQw zNx%nk8WGpd1wS2X_|U1w$?%m>Z&mmzwWx+-g}`rv!fJwp26tt>7WqMJrv|8i7o15> zFNYR#kc@#VxdG&uIf|SH;hzm#v)>0HrJz_-k5Wv3+((PquA=g9ohgNP*cTaxJq6g)ABo1pBG3h~8#72`Dsq`|n zY@32ewh}N!h;ry{RkPgj4%}qGvA!FUz^^Ua^ZMEp(q#i@+f#dm}Lce;d zryk1b@mpwbS`7j(^=qtJQuQGp>>rp9dM=n+RG0U57W&HzV^@iZZxREv9_K zXFDEwq&92k?SBr)?|fa8f%?fq6BX}7AE$?@p{sTH*6kk+-um!udi`Q$%^Uxy zxOzkl8h`WA_U8xWBO`z1lV>`f7+H;S7ENLnjvIiC^~mwXG8|t-12qsX1iki1_nF}a zh^EWlK)}`;2z>v60P#ck3b@&c55mvDUy-~I$u8OX-O#Gyh2~DoRrptdW{u_d;39_K zs}>zle4h?NL|l#J%3|*T9y&1g?`nSN8;tSBQokz*9bW`LB=(oZ!a^Cae>$dhhyDC1 zyu8?FXiY z7ui)5A5)0bC(ml6KRFu*Ft+RO$2y;053J~4KSP>g7*FRinR==xb~e@=av>^Fi*@&Qj z>Gc=T^L+ayqWqWQA5s_Z`!F~?y`I|G_1)}|$AWa@GWE3X`qjPbm*y6raRt*$pA$c_ zbd6io@Wk(iA^xjx54Vmu88(iwy%*)ld!hEk0g)i}d_W!wd!PF*@QnF0f9|_Hlus5t zezU*j*3w%vv`e`xFfViUkxGfNFO z^Yf4q5sd9w8G9aY9CA9Sr(R#X<>8OFEWL7i5a6>U~o<;1gx2_Dcl(11}vL zaev(I;5O*>gyl~?vvAtJAe{_%8CmJsbIz)axu|K~`p3p>9fZC)__w_ebjG8J1GBf( zY;L`1NN%j`1j?zn&M%m^ewFsTa1 zF^zFNJtn7#CsuoMtcf+%+G_l3>dz5Ut+lC*rWhNKe>MKZh)LDn_hx7Fc35B)V{=Y< zd(M0F?z{KCH*@cM-@R{ZwQEat8eGJl7PnyJJ+yft>DvPLp!mPy$m zCTWEn#A1~F{$>Uwbke8&~+}vmHL&23n&r7EI5-H6Z zFNSkw+D>~w$eJvMmiviKR*7jqEIVTB@GWbu>UZd+V9C_9K54weuAz1<aXPOOkQ?*7`ccUXJ1HoZ~7>b zG^Oh&8{AX$BxyF1o9ZM7`BbJD}1w{EpcY6H9WtwETVTK$7$i^p3a8B&l-G+fR=8MTSiB+A!46<$-N@w*2mt*fUL}4M&}rUK8HE zJ2~6hy2cZCRz{PgMb(@4nfFAIq=&F!ZmVa0tIDpCYo%-AhkV}bXR#$8r4G-!Gn!o3 z@cNQtn?EZdNv`6jn+{#{J50AUF^#K6PA9o0{B6Z<%jFo7RC+4>M+M2HB&oLHgYyTg z2a}{ME8X-@>s*pFw`pl@#@T){$!oDL1}$kZUIH8yd-;b8WlSZ*=}a^SK}J`jlc_M~ zIDrNSFPj&l(THJ67sV*rGKfN?mglb)4r6@!id`Hwrvx=!DT!JKL!%DImr3||*E&%w zlPe%JX7fQiTL2X(W(%QhsW#E4Ho`&MoJI(LAVeu+I5PtghLh)HO0?PKFxi|;ncVHl zRdGDy@Qz_Y6p>-!9;C338tYTwBgJ@?_y~Q_6DK%~0&lb1UZ8oK%V~5o9#{%5i79B* zN_)yGXb%&ZB?hm=Y2>XQhAsnyOC;Aq(BplpA&3LbXnOg@T3y9Zf~oDrq##bbpjMR) z_OZ0B3QtPDN~;=&BaI|y+lmJvcHg&XRcpE*TbnA86S5|7HQ5CjrrRw2fdJC0*KHU_L8o-aQr(Y!Uv8Vv~oWCnId6|X~rkX91ZhKclTn4a7sJ8d^x z8Ap{49(9bJae$}7#8Ha`7Y};{@5MEe7y%mtqJA4M<>Gz09=?wl?e+T>1ChMV0S>NW z?}2o9z0*v)X|s*5BGuuPs2!d-MIN#qFuMTTidayr)5ui188ccrN<=unL1MGEknB!K z!drO-Rjxpn1US{c``iD48rWiRN|t$(R7gYRTpHlb zsu5$eVFYg>b&ieqz8F#@i7}9=8j&lzU6B29k|A1*L?lQsVGkR+iak)z2}eBRaPw8) z(Gk|T1=#LLSYlVha^(Eo8pW=aJ3;Q%FlDc@B_PKxK$Q_yrBI1syo;yp7&eI$E1;Vq zMd|g~Y2YxmE6zt#(574vVyMQ3265E6ZdB6g!>_;Bdz&8U&G)usQBv@})8-o4sL()u z!k&lqBTT=5UPcQ%>_vFO-oai$w0K1OH#}jV2y5vR>~+M3{DHlJs#ZNA7aSo5tPi`; z45tzY7ao!|LNSOnCQz=Z#WxV?by@3S8OjBLq4BgKliHyl^u@7$1EIaXXZ7lmguPF{ zx@v2zea2C}k~}V)nO>UwX#(|ok9hjq;y6mVI#4pnKYSgZ{HK12RBv&xgvOP71SPGk z-Ib9#sn|db`EX~}f=jW;R^Mw;Qc$J+)SJ$nnYjDnfE}+*el(NoC|QrXRkUY$$JMQO zyf#N3SGO>X?Irf}Zs97HKu?le>yCQ?(LI#>Te-+{7T z(DSh6jdf5?&84tBq`9dBp>%and2io3{MPUSs1@gh1{l2(7&>5pT;)BuG8Ib#;_s)R4wOpu!0XdEe*yTP1sA~-Y z1e0K5@4%>h-h07)p2Xe-njw_Ehnf)fzT9XK=zL#r9S{lwg_HzV@2hXlV^P*T+`p?e-*v-$OZKM+AQtK zo6pZ2^!e=>l&7vw*(@Q4V#ke{w(hAMDtb}l{lekr+tuIZe*HQ+XZDTAy{U(s`btsk z4gG~#lQfml?;v`bOB(WcbLxfwIDP7Wi_-;{oSPHYT-GPV9ei`qps(~HPiB6dbnUwS z#E0jHz1(GgyXSO!@@X$#J~(=G4z*$XYnQe!%c1_0U3zS6Q4STh;+^-GJa?pB{lOw# zYkX>aR{E{+KO9}bRdpMzHnp76_vX>?e?Qh#R7EwQvd=*mUBRgtX6T1eeJ`R7AFZP3bj@xWv%4wbwVS*@?*fdxkfFg^cDI0V}J9@ z&>!be>Q)&KsMbfscJlRFKD0lsz_PlPb+s?wa!!(>$Sbg_Zk2IThJ04W)pAO|xdy}X zmH9F}6RoVJxdx}hGg9TWm}r%8+?IN;tJN*o7sLC>elk20t)INy2~aER4={vD-f{K3 ZNP7bK8jR8B`8!;6sUzLhlj`dqJmcGKXdP_vv)Uf4HWe>lXGV7 z{PWL0|IGX|-~Z31F?r1uS92t3J!=9XUJVM)xnj_!gdFr+*SgFIIc~-cZePuGQ3-ie zMPIECTBtA33$@Gqw~Il+70lZ`qISg7?dyYL>jL$QVX=vrq!){J|1NAnZsN)5t-9j)Z)Pr0;C_4QvnFjUY6j{ zm+I5?LVE)Ibzh=O-L({#S|1Q`r^G2E^`>P;dy25o5XPpX=nYG758r6 z&vG4qs919}O+XA+jE?b<-hTD-87=Ggu;SzB2s3muGn58LE{ie0PNpEL=A`KP0m;fm zS0TTeo!=vWf+Y84NpYx2WdSQyA~tCbP8%v&@kdDNV~|AHFfGjya)W*Oz-UtxkK2h2 z#Li)}!S-QK#JQ9!b(4BxyOMfETGds=PRK6b%1!xI@15t_HCGOu+&$~m09Ju7&rQ4T zZ@#Z>to99ZdYrd${z>k>%-2TG&18(=fL9wT2{C z0pEo854>HT#*#XX=pl=*l(VFE@8OYl9bVXq52ihxxA^oBR*4Bm=XcvYzKA8=knz?Z z0ymVfB;0MX zVJAy^?iomT3^qvS9wyq8;@iVPO=TNLeVtyqn&*x4e~@M7PgrRT0e<=zedo;v&%wCGdcAOHDk0jfJatUk#< z9({KGO_R3V*cokfZC|90;Zg6h#;UV_?}u(bd`%vgT8WzWl@)SnaQ7JYnPg99=cej_ zINIfr!0Hm`iHgl3C;-P9yics-C;ET}SCrg6f#b9kTE%Fd>;k;jqi?EYxVGSGXE9GI zh`ib<;%P}juzO*HZGsuUCj z7XU_gRu_ieC9DlYEO6rCjfu6Y#XyFsZDsN?CsC+XVg17_ZHpp8XG z59%CCnNk+`+u&$>M2q_i{&rz?TxTYoJu^L(T94pyN}{*k2A?+3BYMGDVeHJ>kY9!4 zLQdprT}y+r0e-Yw)ho#9lwgUpEYtL(8q!s_7i`?Z-WAI*4b&+F1gBfAXT^~q<6C%g zX@7_#VCO?{vKWPOe7ra>AUd(;*tJ(88kzGN)w46x6J}+0v{-{s9pm$gE3*&%iKS(Y3ZB5&7I2%|~r?SdzS+wQ?_X%P1D5S_s?1;IN`H;^kNgcN)rCzI~xxsazo4Ietg zI0bnc)EDUT45O%oVwoa81BIP~g9dk{%Z&UWwle}$zzd1fa>}5E93*StN^Srxp-R`X zA^hX9H3$3!jfvf+(*^`hF14_AI13CwyD!_iS!D!(8I^jN3;PFA1(&#=)*|FlbB=QiHL+zo1AD~9i$P1 z&31ze=SBeaQ$*s|>O5qv@1MA~&whAzpdlxXGih-7jiku0cIgS>9*yxLrEwe$&S>0b ztv@wu^~Rl`XLVAS#vK*i(3(fa9Q^glZfJdKw_l!GV@Fnbw0_78ifBEuj9yQo-$N?8 z-19;Y&%)6tlAq&lu7vcM2d!usxx6L67+LG4lg`B%fXK(dRv-rX`1zG9tGoU6#%)3E z@Rzf5Q7^5(j>`ehJzaY1shlV^s9HxxJ$v%1t@n+g^=~U@?|-AB(~#NdhT$zuf9X?z zto+k{cjl!BhgPB!NUJOSRlTNT!9T0`zi8NoW=4`LXMC_egRqyI5Az0S-QLvX!b;T7cLUy-A3^e z6c@{hh#Piexx3hTfX5K*8f>4p{44ANSW9|W5IVjHE~RTwo(KzNaSdi-T3fDx{5f7; zvR@dJWA2x1It226fFmv^Ku&>in+VG1I4d50xbgsAEs|)x;au;3ZObZ0CON8T8$GQk zZq@^WArB2SP#j*U>9e$T0D|&NJAGrOe6)VPY%{-M)5X)X1`oW!f!-yG6`I~g6{2sp zbbMM8J>hwmwy&JDa(n47J_)ZUtv{b94jeZ7;^|4ei%bXl=tD@^rk=xRqHFL&@~3%- z>PJhWC)3_el>c0PIBVg{C&JUC^{m>x=VuN*9;VY_+grMScE^UrJ^P~3eWw&ZuAVH} zJNm9U5B`1tlE3)+$bumsMvY@_??8F-rf5EcK(xp_F_6c^jwiuZo;rWzPl6YQ^69EK zU<=fbDjs2?eTISd83x*C$Uh{sHzSWixMLbmulg9@3EQu(__PGx0zRn9`K08;)Y06$ ztaX@P=-1)5$V&|PPsITI7r%GFfA8v&y;F9c4&R`(K7HJ?BiA07h+ws1rgi~7cN@ad zW@Tl614!qXVf>LRkaZ9OP)z&2oo_9Ew)V5Ip0s`{_sT6n+r?mvf2EwS&P2Ob@k7szyVAnXan|;B0{-jI9UpRc(hK2jp!Jlc>$WsZemP7hbH9MB z`0Tlw8M(7i-TDm=jM$owzB=&Q?nTYXXzUf;pPc>7o~UuG?Hz#M0r-(WYyO}726$nB zpJoC+#ruRGL3EGaYarLHwWPNXpxW);AbkhYcOX3=OPde)V#s0pFMXmJc>n+a literal 11883 zcmeHN32+q06OcfY&REW7$4iFQra2f|l8S;B(XXzcSq(#EmRZ**| z|IF|I|NNQ$-S7Wim4>gKrq{_$Y+LN|P=cE_PiNeE-eXd)B{6ZF$21-f*tp`c4DYcE zytGD2UnI?!(!>{>GZ{B+w@YFR7tL8{UT=1~8GVtPb}*6{J%kqMVe++4Bb;&+yCpH1 z6t{5#H_E8e9OzqNzL=5mEsZgJYfk;y>dqZahqy4?+>%&_=wWwY&dkQQoha*O2Pw$eQm5EUlFK z6;cYx_uKM2Xl!Sa6@;6rEf7_m2+S(NZBqehzFKbW6&3>%CPtY&Hi$0{f;NJ;+srsX zj5-`PI6jP_xVN^jwz9Uxab$*|vk`6p3zd3*Q%E8H3yOIw4_raL{ zpT>40SugKCySU%#e3I4Ruh6iGhY4$5b6UI9WMybr@ZEp)7BqSR>3s{_1`i=w@>F~_ ziDV7IZE*_8>X!lS@C=f*9-H4gkYv>-g{Rf(h8YZf>N#9?uX<(l(u?FAwqGzLyxMRqDWS=fo)jkP#(!@@Ele*Xz{{STpzP1dC9dNq!YR4=d{{BybsBGCFaZb zT(1~N7FHW`%1*N8f!g^&86<1!s{QxUruuDAime1v>MC|(`4p1%n*qmcVTGwA>vP=B znMu~iA3>YjU153WFw_|K)N?rKQFQjw`{&6y%-xa3Um6-uKJC$; zs<3F1)qpw7uXWF_)z~f4S9)P$*9-UkW^BP7@H7x{$1y=lUk=BKy8%~;-j#W07Blc1+(OJcZ#q45nbSR{FS59e?$lkdT3%#H)=Yyos&n4Ji; zrrFj$vr!J(#!-;|K#F2vcrybT1}E?eC(i72m~0$lQEp@S5?){&-Z5DbXJoQ+OH$d# zP4F4;vEsc(e2gS~#d{o-N3hv#b7;Zl(w7^hy+Qis-sSGlHF_sq~K+KeELQEuA9M8CI49tGy zIORgk=B=^TI4BUHGO#n6d>smew8EekOtf#owB={mX}j6VI7;+zsb}nr10oe396Q6~ z6kx5$YjKSvM!~{>tluU``SQAa2fmLO=Z*Us1JQ!b0RgTNZ;5Sqqti^gXtPZyA~gT!L3q1l|!gtzi4YC?f746v&``I^zi zwqn{;BH)Q5uOqBZ4}^w-GCA9}Jj^*Zn%t)k8V8mSp4QvPg4`}nleGnA!5qj<&8b1k zkA>X_pM%*%eQ&|O2PW7}xU0n7R^08t-5+qb3wL{P_Yrj3?}MYix4ut5eIL144W|J$ zBz7H0iji`q$OE`wh=`m?+dX(}KOT#M3YCIF626fe68UHajy$x7$4haEtnOVA>@v8+ z%T0+)nk|R>`4YQaAuE8C@$6k7Mjb3M*d;CAA{EL|rIrSGv+Bfz92k+8khaE_*S;7s zNfP6sQl)SxyPQz{@{%!5jK(Y|FkuZFW5kyDj)N_pakzw%Cwz=GE)OhstSqq=;Eqzi zTVvU^%1w}KH9}phY$52euj7uw-8AUL2*D}Pb{RH_9V=j%VngZWfc`oiIuOwe0iAZ@ zW;}voV8n+#;?c=%iMxAD-emwjH~wrkl7f$&I(=kgLj%PLdkOrX#L*(Aqb4_viuR>N$d1R&I=J^^2=(ji{>f%}#Bm zB;dLFUYlBis`RY4oT(nP?@FiLtFxZX!}Un9qou0#Z11?1ddEvgDC1fR!*W4eX~wZE z7q)UW`>ue#%Y1F{X7>a%dls~T-H}@5wzL1GjeXHi(WY;Uw)Vf#L9y$7OhbrLDo;Ra znLEA!+DDZQF!q`fa$ok|l#tg}3pFw!m$L$PsSg3lYC`~z$zx(4z^Jm{d%=CO z#C``nV<`IwlMwc?LUb5(zAv~A2!(-CN&>4(dvADzF0s@t@HxSp|Ni?0Ck^U(ZpFxos_Yea3}?2Sm^*jp9sK?3Z1w3cc#w2+Z(JUFoU@(U zRGEkVoXm9VJtYs#kD%JF`1o4A_Un9j5Q8c%YiSUwPa)DBDZ*MBtO>22dhgYD(Wn>$ z-6oTQld&e0o;duR=uA?ci@Kq&WS~q#w{Ad-PeTIT>vCb<1%r%_@T-7=lamxWU?SR6 z)Us;swoVGZR<}p$`E*}=D1A2>ZOCvIJgoRvqvxlw&u_wKIHQqI)|w8?{!b3f8i&u( zg2SbEl?=)l=*8(61*c;aoQ`3CbU3ZMvi3IKXG$x&-og9Tu+-zX4f?GwrY@gdhWV%1 zq41;P^Q@^q4}jCBe^{LUXD`E%Q3tLWjve@N$-Wui7&=wnxcJ6fw+-8VKkkojpR1~O zpRP}T?9hFg>rakAS6g)4;nwG&dwsI4NxD3=GBf7LPmUg~*S;628cekKe8yWxU&zYp zwz|6X8P(A$u4-wpZfo__dsTm$Qb+H#D72hLxs{`b6|7H3xU4NGcnNh*g|?vw!X(wy z4J}DpemXPpwt~||6VMU-Q=xql!aqpP5hkLyKd3&jvwyf3r#qnXGHAIe=(C?wJs%A1 zi2inK9+!RRP^0HZ#_2{pzX_+Ca9aJXt_i38&(s6f(NSKUR$)5i$6iiX^#*u`jZRB} zC707O|GC0+uP``q&>VedVcxSk`hNOy`tr}d-nhth-JqokK*--o#fkv*{z9#udavrw z`0C7UPedNlQgu9lQG!+f^9kVqm-iQHVOqUcgd1;Pv7^GOmIiBItEb+3_2J+0ojMQXE}uUz53mlYU~M^(}TjIWLntxmtdTI&VI%c@H9I7VW|lkZ@Uc)Qmns7f}? z?)FKn;A1?Z)yw!87b}RWl2YdJJLlb{)nUSXB}?yO_qZLRHP}a0`q*V(!0nvpVeJmw z3AmLkpX?1ZS?xp|F9_{s1c7OSaqR-@^!r3fkew3dOdy=9as`M_SXQVu%VM=W#-T1% zr>Xh282Du_QMpVdp-yTFu((s~-M6T1ty+7W67zssQBiT{IY=b4l}f6ktwzn~GL%Ho z$#@t6I&s-bY<)<#+f3ao(5*aaW6_2cCrdY%s!L#cZYWn!o}?X;#X^yGzp?P47hZpZ z=JHN$zV_vZCL)R}LdWPx2VOr_d3eoUdiW?h!V2Bg3c142ZPC_ur9O}$=H&480m;k- zyWpyav}+E%zK>2qvgXmGD8!7sqY*md3G3^t=;2#Qx<(_ha9~Q3Lw19H`9NtCM338v z6~t!2YJ>H|7K?K(o$JT-$9lyL2-T|Vh@OyB{^*XmyQhpkL+`orNapUZXAYwUV3Je+ zP|*}Z`t`>BNh5Espt&L}COXo^9XuRZAANl4SO_*gjsPB6CHXZXdy_Lp;RIZU5nrSN&jvV?eAfazsp zGOpX6M3Xv;|MBLI$#>EueXA_Sjz+Dv2QTXK!tf`iZpmJJd^jz{#P{y&_w0mXnl!f8 zTdM-$;|TNh_|3&k8Y6`K$lTZb(g|xuzdp^W-ass%g^U z6MuF6du1+7`VU^?7fzbgxc%v?NBpKxBk5-tYm1BS3{A#4lIdzIU-w@9*|BK{50w`0 zpRh2oPam|hwR8WOM^e!I=JoB)?Y)t4WZf%2E@c*G41ND}FT z)*yfN?T21Fnuj(VVmp_OoPb9EYw0%E@l;fMAo2RD>{`^)@%)ip%|lQb#~k`EHv`Q$ zw5X-gZDd8OgAoC+d)a+#gME?@Xkf(n0WlVfl3-LZ z-pAJgqO0MyEFKIS>>!PbTyCiGH!x9&^SXU1Tg#Cur=S462!CB`(~hdqZ18Dk$m z5a#CxK?YOv*FqUSapq!`9||qliYiH7#_h9%zX3i$9V$^}O(!Xwh0T@O4Mvj_L$f}<9Muyr{k)Ot?qM9PM^szE#a4QY z5-5n%PKHo2=xZsmf}4fYPps5F4ELGq&82`x4z9R|H95%Ok>}S$b!U?4JJVmB?_oSn zj`cQK;nT``ST86m%-wsw?3duWkQ=!|Rg&OtfT!)2)CzJtJ+MVeimv)$4at(*3p#FM z?~BLK71YTD7^homqK6}4$anCh8~q{DfRmTuW>EsA`Rr_CfOTTYvGXs6G*b69tY;sp zC#*{EXwn9zCW7^Y4dsX*rzE@FF2>O$;j^Q^6F9#N+M#*#i^P9>29Xd>VcMw7&Q8IO$HmgBZ02n_8Izri*M;6Ep8 z$R#tfh%e++(p|q7d?nQIrPGv?;j5tDtnxFpVm1`5BEJI)n*}Z!*vdLRHiYO;3t<5- zIE#{24lU#&nL}G*0hE|~R3#MxL_5~zfFFWRQMIKg$(RIj9o%GFFQqT8`M}V#-hk9} zo{kYGAj74{omIXD#L>cLt~bwUl_+m)0(E2g0_e&Y;%qq1iXc-GB)`OX2!v9mE26T( zSmsDHva^#9iq*38ND6{7=R-Lab#`+_rw;Rfcvk|3^eb++x zw_r8l`oup6Wb6dIfLFW^XPY1+TEuU`)pv3BB4k8&5-wSH&cIL=pgO6bFK~68V9G>z znNYbT;|RIbNI_j3ek$PKhu&>vry)0j`Q^(trgwL#nf;N{L0jCjV8lmO+kwU+^ zE2k0c84+ISXnGAoGwo}%R#NrBAM6{H3v$kzTu__+c0cr&FEg@V9h8Y$wif1%skZ6; zwBB_7wjL7EwDaHL`$^;*Afa(Mb=->1^J-stxsZB!(atQ-&a62l$XwSQa7&coho&** z>pt23*kd*QcHI7=puF>SbvhcL3{i18^rbCjbC0EkiD8I!@YZcduiO0SZASgO%IY`% zS#jm?YBcud!)?!BlZ(v!Q=k4~_fthxC~M&aX2F3v_5=d-%bGz}#Z15R%p><&06nT8_mE-&l3PiHN_^ zP^il}TXRt0(f0hv+L!UZYdq=>>wVjlx8#q5`)SnqQ8$iTb@SzulQ=>#PHjJD7U*)$ zM8p9ox%L8bo^QWMpg$%3K4sCK4};4y>M8R(&&@145~Q2Ask?ReJ@>C&+&>SE$-BGc zIq7I==a_}{PyKc%;=lU#VDs>kVe^>Vdw`xi8S1Yb5DZc;2;{A>=f&>=FPT5{7r#pb z`eex)um+lLExAPp{3H$Vduf25#Q&rKpML!4- zYK9hZ=4K-^BADCzrSE!j{@~ApavJs3n;t#3Y4PQglgKfv302|r0n!|mrdcQ*r>Wt9 zPvoT9W3DdwGV5caePk%3{!Mx9zME!UJ~=yZYImWnIISO{?H@N&ll!J{z$d~`?H397 z`(HXT{K2@L!DBG$2}_^(D&qfVv);=+6a}GND=HFj?xFa5o z8dCcuSfZqf7q5oR`LjK~rG{8^N0e`fP_c1?@03ZEt0kOE|(l9`Tj!nW3 z8Q|x{L4a-w@UO?TA2qzDjp@FdL2 H4(tB`bj{1D literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-D.pkl b/tests/data/sub-tracking-results/results-D.pkl new file mode 100644 index 0000000000000000000000000000000000000000..7bd4d9ac370f1beedc36fc4577f5f11fea4b4d14 GIT binary patch literal 12117 zcmeHN3v^V)8NPX%7a`#(2Z%8sERVB-K~NrMOt3%}gMcZaF<$TPUb1_+`(W?Akbp$+ z@JhV}kxN^(wbhnaJ<6$7D#sVv;)|frmWl$$YLvt4P!XvhRr=4|JL~M-O(!@jD>GKW@V-I5Ar)EXCUG1?+S#~u%{@Ez zvRsEBF4f)5lM%y}pks2Rx8L}5X6uGstoS54!VLY)41Ggmm&Kf47fTRTb87tjfMn;Q zt596W&hODbK#~Tqq$Jd&vVfH;k(e|Gr;XLD_(LT1Gf5(Bn4aa<{9s=}Fxm`7_It2_ zxHxP!*gjmTIQJF$diZ){yYlskwW=$LoseC=otyTHzB@0lYpxtP`pWEMgIER5pPPQ| zUvdYtr0cgGST^e3DwfpYRp{8p!-jQ@-`0Wgq*5CR*8PF|Aqh>a_JugzIFTiht$4YH zB~8R>Susl*RSId>JeKqz7QcNAOZqA|pBG-ouq=4z12!(aP>&zov<$Z#Jc| z3Ir#AxbN-CESA(|OfN}%y^EY~rt#67hv>DQz8_|xi2SW*j4 z2R$t5xo03P8fucPJxq0_CAWvR%Q?~2)m*W8>=)VPE6%=GHhHT4-JQPW=s3@<--4XO-m{Ld|;Z$ryY2Y>&sH%icg z<0Bf=LX*%Z*WWmG>kVDeX77$=>UbXYtyofX_HP5x{DapNahcVqc~6Cd%Yxg#;pAhjb5F%n?mVBZ~ zkXL;XQSbt!^kj8m=-r~)FvJoM9^RZ-n_3KJnA%sS7;_SZ+7xzGl%;)9WKec0ZAu)D zC9<6D3m%BsqifNo)FCl!?TW;9C^^BVOe++zDeCDwxPIW#V*97wEguk7fgFfBfPO($ z!Eaq`=$ZT+S*RzUy}>mOEl_x`76!*aP(@xA+@h=)tH8z9a}^Z~#KRL2dN=l1Dq_Sh z!tQ6zG(V{O?927#fkzImxGdTnWbnuxbxEC>Z1&9bR_4iq?D2_#MkjnaMOh4hvBKE7 z^R$o($Az58b%ve=X9GNKziL#G(<#Fe>3Npv$2DZD{s7pxjlDaTVHv1L2n!y++Q^C{ zVaQkT6w>|>X~52FaIzSM3WB`Y5EeaHbL{*pF^$Z5jqBN!=?Sy4I@+v3s7rAB!GijT zAE#&6`|AaFql)*A{m$@(G_VfSo3D86(a$RO+`>ZFE3PrkH{LNKN&&%zMup(ZB5 z9+1Rv#gMT2pmQ=;DQgx|2&706oM|{~z}X_4HQ{V2&X(b9C1my`!e?NFbL1%JND?Ea z3ji>N;kMPdEeire9^yAxrU>}Y*(P#nf~MdDIh}O3&V@7+ zYWUEZ#wkeEP+x3Fv&>>P6sr{J1t?q`95lEqy;f`p(VrQ@0$%tyy`TbG$U(A)w&VuT zQ)&!79|A-kTXQ%BL8oH4^7L#>g18P&vMWdLj%z`<=*2)-ZTyms5htv{p~syKsSeaJ z!)9NuB$$;XPizKzV%x$`Jwpwtf=qCfxxW%xEC{rRqlrBp2UypVm51{Xl*LE1e6+iz zMIYJP1@$`k>V1#*d7=#HXTNx?oOm2b8X7wr=`C0YX+O^1#@RbKI{;a{QAtk#8G8ax zVxB!XdkQjQO48G~`aaIKLq-fJwv1m-qTj<%Sq>w3{j-RU@H)VoO*uk+PWSu+^{{O zA3R@Mi2CSP8Mqwu+|%W^A1jDcgQ|6C?6XI&+;;DHTK}rDcJDu`x(usD6Gpc-|EXUI zvhz>>&6!sp8eWZh-8D&Q8rKim>xpBK6*vY-296-4h3>LomV1f85(hO{<*iVse#aSW`MRRLzX(zOraALU}vkmDrpt(||*Kv^|?J|oN zC@z%~ku>Zka(6L)cLimd;jMw=b~LR19D4!wl3o{tj_-nR(>th2g|)JI2ep{io_8Rf z$16;>?7#Rc(UwV;Patt1Eh#@h$_CD60`vu*s!f}!_uz|lZ2>=QnP-bb8j~fj7F*yF@XE<#5y> z`mjsKrzO!7-hgTQ+No=Il>hAG=z7xn^F`v|5w(|2PvT(Yy3t1`A>~_oj-G|C!LQAy zixAb1mPAjcy@Npisk$j|>1#)#)1&pgdAl#n8h$uRr^9x%^#5eh#^t>Spm77Hl|G>! zE!#crt~n3=b`X+2`{K~zVJG9pvA1^uJ$Y8NUP2&XWL_D_dt&FSU>mQUKk!$ zH4t`&8^@N8u>e2E1pFKm@N=Z^72un}hXd}ICCBeOh3|wNS67`bgXe$~H3c7+9l2#3 z*O2!JraM9g{9ySl1O77!0RPqR9PmH5x@`BfoyVg$D6P-9>DlWZ+&2ZmY9&k^0(|Zc zgagj%>Z?p3onuAvhx#M?AOxV8_B}h_TK??3PojF#`mw?*wnm(n4oJxh#C9(lPy*?Z zVfZnrrA4km&8r|yE{@aF-hSnlldX5(PppqjiPq1IEcf;0+Sc=(b|d?twsD7=FD1>|u<>)8y}g5g|M%w(54$_<#ppKBddA8}wl>}TT9i)a zei2#e*>m+X3umJR8#X>LW?M1(eBYa|ENe+e6Z`jks`i;(apTzAI|08F@MC}3{6F~_ z@ZtbJ#{zu8?XeI)<%bdCqYs+Moog@Y6-4OYHsU+d;zRsvG3`4I@sIrU!wpNrC!$v< zt+L=sq3rEcFS>=a-l?bg4~6#iqU%IUd;8Z# eeDnQmGZpuJFFO&x6Y)C{AHb#Ew|r4^JO2knZY=8n literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-E.pkl b/tests/data/sub-tracking-results/results-E.pkl new file mode 100644 index 0000000000000000000000000000000000000000..098298c27fda48eb0588b8f756de17d31d77c5fe GIT binary patch literal 12117 zcmeHN3v^V)8NPX%7a>6&a=;ja!tyu^Aqom;h6D>_F}zF(4e@$+_mbVq-3NQ`l|&_i zhga$?h+Nw$t*y2o)T5kIrEnBQS_CC1w56iJu^Qzdc&slfXqEmm_s%+dcN3O?uhUG< znYr`NKmYtQ^Ur+$KbuAA4^$23NYeW>gf(7`2%dRj#Hna*`fcbvW@_$RaDzYCFhf){ zSyhZR#^A-qLc^h761rWC2(oPGg<&D+6%?l@6cL?sBSJtl^y~?OD*T9iM|-66frf~o z7qRUApeja!f~+_LLQrTBBZ{HtRmdUFyqnEB4Cv5%TU~hB?^c}AK8D_%*MeccXPzwb zZrlmD_1-}(5N>kvM4bRA?H3{up$X*j5z!M0Dr!XYsF*W@a2on0pgv()W4O4bMrDfI zSZ>TP9Ih1j>$S}A_0luEYExLmol>V>XSA#^+tc)vyNv4U>Z-GlNaiY64R=eu;qdj) zGZl{@3lZq#E7DWv#&jEJ>&8L1%B*WQ&hzS1wv-#oV0hm^-{8tDbCWoZA?<3~wCdg+ z2UxCSkCf@|mPv@=O3*Pm((A8$HnVNrURHb(9btxkW`@2YvCCr4ubU-^syQWoen7Hw z(N!p}XXp3$A0SBsSW*&dQdz)Cl}JpQi_`iVR{T+t`kN#XHcZcQYksh=AQ)|iBKtkq zKwKO)8*Cr0RGj+?eLa1BuwD83##+^t#7@XA-^xw>MZaBV*)>-Vzqfn#iGi#FU!0kK z?O$>Sv83y_9A0wGebp?f)2q1+u#gocxBv|){9)u(`v)UKpG=2h0B3tod zElZk!(~@GAbWItgz4KYpS}cCsXqI#?F1)S22<%|$dGF!!Q%6^3FKc7>uz&M}vR9ka zSOtQU-aYtwRTfL?HoCVYzFftUI=zQSJ9T(rE8d^JwP@+dp{x><-nygblM_o>(hWJU z{Vse%IZMLbrXP}7(p}K)tubXRY0j#hryO%)E+|u00n2p)D{*ffOZxSw{r$V{j}E;(@mDYJ z%tb5y*;KnKEgR7-JHPV7a^dbC1K&PfikkmAU|7YOk*F%L{y)=3%|S~(3I6_Huauw# zCr31;g(jj;uOB~U(~aHH2Jem~>Npu`s{s#C2UQzLg0O>(hF)4YTP{8dEie9stqckb17_iz%N9+QT z&X=;Cqtt?^fG8>75)vi!!(WOP;5F4R`|l7`e<;X@#R#aP8hSBq^$8L00wE$MX2~a- z1bNjL5d|+mN-tIyhTbEp4MQyP;Ni`QwX4M-hN)v^iZLfqs9j-KM_D=+MFwT3(yqi| zSR%{WvEZvPdvq<@l{z?vtwWL64kahplxc+`Hbp(12iFfgT5SK+yX6C-Dv$$F56~~D zD)_An4Ly^eD+>+evk$n&p#=)>)xzKy2&%}-f?JdoV+FX_daj~^fp~Z#Lhr#IOGS+M zMcDn!ndS#|pMAN$Jn+cD6_-VugA5+IqduuKlg*x)KFWMqkUc&z(By!QGd*EeR!5sP2=xhWKUh#7 z@#FOD27iO#Zc_2yvELcKkOtOadh?Y}JpOs*{%I_9?RJfB8GqY|C<*+jJPSjphnkoK zdq5Jy6+^=6gU-oZrL0*>9t}*i2lqF7VyHy=>?V0LJpEWv?Vuy zo>FV*`4AxT*qXy32s#zRm8WN862x_Il3h9arMMP^i(U+b)uylM7;(ZH9D3Z@km^Al zGi>(dN`hHQ^2BDaC$>HO)HBqOD#!#!nfoiD#ezV4IGWhwae#F#T7E1KL0NoU%SZcK zTlLbmZm9Q>J@-G^cXK(=Pk;491@SnNG&FWL(rd5~(jlC^j4@;ad(NHa6Bj-dWy}~RPO-3Zp+UP-z3m}ai zVzrxGI5$JAA0ras))ygr{lMh4{SUyy1Pw}QBuc~2uO-EPbxKbN2x$};D~;o5_(mf; zd;QO|*KF7Yde*1)ShTacCwk!Vafg1nx+hwf+4JWo9&jPMJX$|&g-x^`TgI;^(eDuz zT_(TKTV6aC#qx99Ej5t-D5F(v*R5iV9~-?%NJ zANiuL5cSosHgGv``__tEPZY$dLDf1s=GphI+;ZPIT0d7=ci>&8a;P5351F@17_$kL{1_^~5pAG8}^>14j^2L_K?HFic$$UBg} zz$;9)?7#Rc(UwV;Patt1Eh#@h$_CCB0`vu*>c<|dIfz$_BwBAi)9>Hg@~V(Ujw?C_ zP&<%Y^{`;dLxW5phi7d1c5NSk06o)AAEGHAt)H!Yl3%~^;_2Cd2VU<+ZxY2Emcvnt z=)*1@pO!>Vcmt;GtEa5qQSq~nqw7iQ&lQP-M$}zAJ&A*n>qZ}b04d+nbJQ#}62CTo zQiP~}v?O{m?VSYr&(z2A7Qb{nIz3v?o4@butYOEZbUJKjOaD*qT)(u>05o>M)UwU$ zd*%Da-aYr>-ws65=U*LNIP`pJPIPj`Y2Pd^7xTz%8@rQBnyLE!z`f{)9O zPaDfM<~@q(j*tOAS$@lq|4ahN-}9YA{yW3V_f6e(GJ1#7`iz^Ny>9Kn$p}^}Vd@m* zbGIQJa@N#bZ9?f>E0#ZcIkFEzAc|?TdswjW{}cZm5` z(jp+NW{Gy^bJG16H~kd>W2(=fAeHd?T5{X?U-6r(Q>zPfu!YdV^6d9SDIp4l5Wj=jAL^1C2E_P5Rd zlOF;v4Dz#76Y)n|c%Sma2=UQ7&E&?lm-GT6^luyQ9cl3){E g^Tm7f{cHmj_kAzB@V*Q0yYL>srNg&;QFA;02Op6vjQ{`u literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/results-F.pkl b/tests/data/sub-tracking-results/results-F.pkl new file mode 100644 index 0000000000000000000000000000000000000000..d5d800d365a93274dab43b6337e2b5bbad46e6e9 GIT binary patch literal 3842 zcmbVPeQXp(6u-9Da;3E`g;EeH<-0&VTOkpm(4!@Q99E!wNU$z*dvmvY?Cu@=0X?)( zfE1PPD`1SEc7f)<;Y7zh|6K{Q6+?A~4GK3WgvlDXO6 zy!YnKy!Yn4>2V)>SmCl8sSU?0!PJqs1M6PH3M9vrRCic`jch{J!W*z*DW*aD>Ad~4 zn>LU!(OpF(sudkcn2>;}nNmP{TSUbv(JOnON75Vt7@g zp)l4BO3GW6sJP=5=NlGmAQKZTf+7bDZ;FSKO2N`%vbaORLVz&=HJPYcYAo&*Y;{yn zT1L8#;viShu^811Q@2EuQ5M^jl#BuOZIXWKcO9f{1p#`PZlDeR0{GP&qEZbh4w~^8 zW=w^zE~b6G&U_Imc#F2Tx3BpZn%((ZOd9CxqzzIvDKSK(ARU;bI#L))V|yus%>`_2 zo>Pqj{r6fvYoUi=d1;O`ug&8a$>rjpOV{@u`1I;^o=RQcJ?-3q)jp0Y2V;KF&8rVP zhEIIOi_e1*b|`Z@l;)=&i?hF{5`vOyuFc*bK$%o<3gHm63$OG0yMGUwcP0G(sOCIrOPL9-E`g_Yc^#Tl!f&3vw;Vf<~cYW|efX zVBuri@WGU`=js;^^ET|z|G0d#*v%`jvGv93>4P2~y3+jhH)qza;i1G)xsoTEx7*%& zqTmJbch`SWcks>vUWwJW_e}VBRWlDQ9sAw!*wPjr+Q9ghYdmyx_LGcM;`S65%#hqpujEUmgV)Y5!o)xMI-O5d^GD>oh;DfjK{K6zFgEb}Ez_IUrl zwV-{~bAJA_*|+!n%*NJ7&-)ttIHB5?~)`awm@{6AUU3Mw{{9T1(6`jKfu-P%n_i4amy*dEqGX&l{c2tg|b zg#j|Lpr8O&4B87NHYqbqa1c?jMUZMfEDfy77_48?;S7@Hvs5OPLq!gyxPmhZWT?m+ zI`fK|Qa)yCjhzZoL}SaT(2Ps+!%G7!Z$V^n3RP(W?uucNY0ip& zBW;q4*DTITjuUp3V9f9a)S2VS__r~$%Rrrgn&J`Dz%4&Qi%|$>)hV7yVu>lV?St* zkBUiwHcY^TF~+Yyj7m_V{6{p#NKhmJgJ_~cfJ7jY2>xNB2xua{*}c2WT|X$DOXg;O z^WK{`^WK~HW|!x++a)f$k;<-wC73!AyRjZHtVnuHNqM^!d5%rUTGx7PSc+-TgLKX- zbSL$b3GuC1M~Xs8SpsQMWCX;xjsu-KQZXgICS;~BdA?ECDlB+teIHXkYWT> zq@gaX8U?IYofSOFxEH#k~2(~&Z zC@mvhM@f(?=va(vhN)Yk$tVkKN=hbx`Zmcy8g}ibp}Yv)OV?9>I1hdmd#F@F3ZiB* zff-Z&+9&AXfHUtVc{^xpYir9rXm%HFG-+h8gZia%QfP=sK{_x=)g-?&i*0!hn+w=N z-bEjFN6DIFjdTwzFV#tNLSDy6E*A%#OddV3@A&sT)%AB9NMx`%$WaZ$I5z0Q+1u^c zJ~+dR9|I%oQ08_h&CNa*XMdAYg3@ZP8M;4!a;eg%P}9Nh@5o2cVl#PY3~Huj0cl!^ z;h|2}4sGDYU$)x}2f|=5;*D4`_?HGwTVN=%$Q&f>Vs69yBb?8AsZ5$GRWiSlre}NA zqqd*eR({d7_Vuch_xLk6F8+3E(~UY_fq(C|E&8fxHV-X5cCmlqtF1gV?o$|_;lW^~ zL(MvCDAbSx!MVTr1~k;iYhT9N$|fGNP4Ue}9%^E(zlMhvHb6VGg@@i^;y+u$L-(U% zW|efXVEzN!aPO_l`+a+^@iv^%*VOP`pPN@eYyS1e3oTwAnzUqE44-b{p>f;r^0J8cB=XP~pF`U|*MU+t%n!TAX2YDzdAO@lSYg_W!B9|I69hV9(9@Nq2m8@UNvS*BpJaG3Hs1MR zR&eX31=X&?4Z*>4A-~HDx0B<+?y!g7y-6V_k%zKVZxr`pBM?CbBJ6v~qSz2Iq;$UEXt)OBP*#Xf3sUMjp)U7)yDHJ*t)MXDVp)`*77(&!aKw*GPEGQ^~6@v~yiA{dA@rLjdpiYQ?< z8GvB`D_8|*g|&;eSaB1=#g52>lz1UFpwgC2+GKfE-+@)krEr_f|gz`Y49GR;}>Z)8n!@j8UFl;eb5r5JO( z0d)*>`@w-qb{$82U2+$SBu!>Jeq12ME$|Mfnh$+^D12nzswvTi%;`dIf0>-t%@7rNXdVlgm3 zP`@xxZ@&>e2oem9i9ksT?FX6xd5C3`!mUWL*xY_L=Y@x%0Pi4Wi<6Rgx8<*gzcbKB=A4$ CaAA4? literal 0 HcmV?d00001 diff --git a/tests/data/sub-tracking-results/sub-tracking.csv b/tests/data/sub-tracking-results/sub-tracking.csv index d20c760..f147920 100644 --- a/tests/data/sub-tracking-results/sub-tracking.csv +++ b/tests/data/sub-tracking-results/sub-tracking.csv @@ -21,6 +21,7 @@ process_id,code_block_name,position,timestamp 1723815,X,1,1745449624.4956949 1723727,Y,0,1745449624.6481984 1723727,Y,1,1745449626.4954815 +1723727,Y,0,1745449627.6054815 1723727,tmp.py:9,0,1745449626.5598955 1723727,tmp.py:9,1,1745449627.653375 1723727,tmp.py:38,0,1745449627.6730132 diff --git a/tests/test_cli.py b/tests/test_cli.py index f5cc7fb..2c21fbf 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -151,14 +151,14 @@ def test_analyze(mocker, format_: str | None, output: str | None): ( ['-p', 'tests/data/sub-tracking-results/files-to-combine/'], [ - 'tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.sqlite', - 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite', 'tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.csv', + 'tests/data/sub-tracking-results/files-to-combine/1723811.sub-tracking.sqlite', 'tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.csv', + 'tests/data/sub-tracking-results/files-to-combine/1723814.sub-tracking.sqlite', 'tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.csv', - 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv' + 'tests/data/sub-tracking-results/files-to-combine/1723815.sub-tracking.sqlite', + 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.csv', + 'tests/data/sub-tracking-results/files-to-combine/main.sub-tracking.sqlite' ] ) ] diff --git a/tests/test_sub_tracker.py b/tests/test_sub_tracker.py index e9e8ea6..f568e6f 100644 --- a/tests/test_sub_tracker.py +++ b/tests/test_sub_tracker.py @@ -102,18 +102,22 @@ def test_analysis(format_: str): sub_tracking_file = f'{folder}/sub-tracking.{format_}' analyzer = gput.SubTrackingAnalyzer(tracking_file, sub_tracking_file) actual_results = analyzer.sub_tracking_results() - with open(f'{folder}/results.json', 'r') as file: + _assert_results_match(f'{folder}/results.json', f'{folder}/results.txt', actual_results) + + +def _assert_results_match(expected_json_path: str, expected_text_path: str, actual_results): + with open(expected_json_path, 'r') as file: expected_json_results = json.load(file) diff = deepd.DeepDiff(expected_json_results, actual_results.to_json(), significant_digits=12) assert not diff - with open(f'{folder}/results.txt', 'r') as file: + with open(expected_text_path, 'r') as file: expected_str_results = file.read() assert expected_str_results == str(actual_results) def test_combine(format_: str): folder = 'tests/data/sub-tracking-results' - files = [f'{folder}/files-to-combine/{name}' for name in os.listdir(f'{folder}/files-to-combine') if name.endswith(format_)] + files = [f'{folder}/files-to-combine/{name}' for name in sorted(os.listdir(f'{folder}/files-to-combine')) if name.endswith(format_)] with pt.raises(ValueError) as error: wrong_extension = "csv" if format_ == "sqlite" else "sqlite" invalid_file = f'wrong-extension.{wrong_extension}' @@ -128,10 +132,83 @@ def test_combine(format_: str): expected_results = pd.read_csv(expected_path) actual_results = pd.read_csv(sub_tracking_file) else: - expected_results = pd.read_sql('data', sqlalc.create_engine(f'sqlite:///{expected_path}')) - actual_results = pd.read_sql('data', sqlalc.create_engine(f'sqlite:///{sub_tracking_file}')) + expected_results = pd.read_sql('data', sqlalc.create_engine(f'sqlite:///{expected_path}', poolclass=sqlalc.pool.NullPool)) + actual_results = pd.read_sql('data', sqlalc.create_engine(f'sqlite:///{sub_tracking_file}', poolclass=sqlalc.pool.NullPool)) pd.testing.assert_frame_equal(expected_results, actual_results, atol=1e-10, rtol=1e-10) with pt.raises(ValueError) as error: analyzer.combine_sub_tracking_files(files) assert str(error.value) == f'Cannot create sub-tracking file {sub_tracking_file}. File already exists.' os.remove(sub_tracking_file) + + +@pt.fixture(name='statistic', params=['std', 'min', 'max', 'mean']) +def get_statistic(request): + yield request.param + + +def _get_tracking_comparison(names: tuple[str, str]) -> gput.TrackingComparison: + folder = 'tests/data/sub-tracking-results' + file_path = f'{folder}/results-{{}}.pkl' + file_path_map = {name: file_path.format(name) for name in names} + return gput.TrackingComparison(file_path_map) + + +def test_comparison(caplog, statistic: str): + comparison = _get_tracking_comparison(('A', 'B')) + actual_results = comparison.compare(statistic) + folder = 'tests/data/sub-tracking-results' + _assert_results_match( + f'{folder}/comparison_{statistic}.json', f'{folder}/comparison_{statistic}.txt', + actual_results + ) + expected_warnings = [ + 'Code block name "tmp.py:9" of tracking session "A" matched with code block name "tmp.py:7" of tracking session "B" but they differ by line number. If these code blocks were not meant to match, their comparison will not be valid and their names must be disambiguated.', + 'Code block name "tmp.py:38" of tracking session "A" matched with code block name "tmp.py:37" of tracking session "B" but they differ by line number. If these code blocks were not meant to match, their comparison will not be valid and their names must be disambiguated.' + ] + utils._assert_warnings(caplog, expected_warnings) + + +def test_errors(): + with pt.raises(ValueError) as error: + _get_tracking_comparison(('A', 'C')) + assert str(error.value) == 'All sub-tracking results must have the same number of code blocks. The first has 4 code blocks but tracking session "C" has 5 code blocks.' + with pt.raises(ValueError) as error: + _get_tracking_comparison(('A', 'D')) + assert str(error.value) == 'Code block name "tmp.py:38" of tracking session "A" does not match code block name "tmp.py:abc" of tracking session "D"' + with pt.raises(ValueError) as error: + _get_tracking_comparison(('A', 'E')) + assert str(error.value) == 'Code block name "tmp.py:9" of tracking session "A" does not match code block name "temp.py:123" of tracking session "E"' + comparison = _get_tracking_comparison(('F', 'G')) + comparison.compare() + with pt.raises(ValueError) as error: + comparison.compare('invalid') + assert str(error.value) == "Invalid summary statistic 'invalid'. Valid values are min max mean std." + + +def test_overwrite(): + file_name = 'repeat-file.csv' + open(file_name, 'w').close() + with pt.raises(FileExistsError) as error: + with gput.SubTracker(sub_tracking_file=file_name): + pass # pragma: nocover + assert str(error.value) == 'File repeat-file.csv already exists. Set overwrite to True to overwrite the existing file.' + with gput.SubTracker(sub_tracking_file=file_name, overwrite=True): + pass # pragma: nocover + assert os.path.isfile(file_name) + os.remove(file_name) + with pt.raises(FileNotFoundError) as error: + with gput.SubTracker(sub_tracking_file=file_name): + pass # pragma: nocover + assert str(error.value) == 'The file repeat-file.csv was removed in the middle of writing data to it.' + + +def test_invalid_file(): + file_path = 'tests/data/sub-tracking-results/invalid{}.csv' + analyzer = gput.SubTrackingAnalyzer(None, sub_tracking_file=file_path.format(1)) + with pt.raises(ValueError) as error: + analyzer.load_timestamp_pairs('X') + assert str(error.value) == 'Sub-tracking file is invalid. Detected timestamp pair (1745449613.532592, 1745449609.7528224) with differing process IDs: 1723811 and 1723812.' + analyzer = gput.SubTrackingAnalyzer(None, sub_tracking_file=file_path.format(2)) + with pt.raises(ValueError) as error: + analyzer.load_timestamp_pairs('X') + assert str(error.value) == 'Sub-tracking file is invalid. Detected timestamp pair (1745449609.7528222, 1745449613.5325918) of process ID 1723811 with a start time greater than the stop time.' diff --git a/tests/test_tracker.py b/tests/test_tracker.py index 4c9542e..20908ce 100644 --- a/tests/test_tracker.py +++ b/tests/test_tracker.py @@ -248,7 +248,7 @@ def side_effect_func(command, *_, **__) -> None: mocker.patch('gpu_tracker._helper_classes.subp.check_output', side_effect=side_effect_func) gput.Tracker() gput.Tracker() - _assert_warnings( + utils._assert_warnings( caplog, [ 'The nvidia-smi command is installed but cannot connect to a GPU. The GPU RAM and GPU utilization values will remain 0.0.', @@ -284,13 +284,7 @@ def test_main_process_warnings(mocker, caplog): 'Tracking is stopping and it has been 11.0 seconds since the temporary tracking results file was last updated. ' 'Resource usage was not updated during that time.') assert not os.path.isfile(tracker._resource_usage_file) - _assert_warnings(caplog, expected_warnings) - - -def _assert_warnings(caplog, expected_warnings: list[str]): - for expected_warning, record in zip(expected_warnings, caplog.records): - assert record.levelname == 'WARNING' - assert record.message == expected_warning + utils._assert_warnings(caplog, expected_warnings) @pt.fixture(name='disable_logs', params=[True, False]) @@ -331,7 +325,7 @@ def test_tracking_process_warnings(mocker, disable_logs: bool, caplog): gpu_unavailable_message, 'The target process of ID 666 ended before tracking could begin.', gpu_unavailable_message, 'Failed to track a process (PID: 777) that does not exist. This possibly resulted from the process completing before it could be tracked.', 'The following uncaught exception occurred in the tracking process:'] - _assert_warnings(caplog, expected_warnings) + utils._assert_warnings(caplog, expected_warnings) def test_validate_arguments(mocker): @@ -398,3 +392,13 @@ def is_set() -> bool: tracker._tracking_process.run() assert os.path.isfile(file_path) os.remove(file_path) + + +def test_formatting_before_tracking_stops(): + with pt.raises(RuntimeError) as error: + tracker = gput.Tracker() + str(tracker) + assert str(error.value) == ( + 'Cannot display the tracker in string or JSON format before tracking completes. Exit the content manager or call the stop() ' + 'method before calling to_json() or str()' + ) diff --git a/tests/utils.py b/tests/utils.py index 75fa4fa..f1acc37 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -11,6 +11,12 @@ def assert_args_list(mock, expected_args_list: list[tuple | dict], use_kwargs: b assert actual_args_list == expected_args_list +def _assert_warnings(caplog, expected_warnings: list[str]): + for expected_warning, record in zip(expected_warnings, caplog.records): + assert record.levelname == 'WARNING' + assert record.message == expected_warning + + def test_tracking_file( actual_tracking_file: str, expected_tracking_file: str, excluded_col: str | None = None, excluded_col_test=None, is_sub_tracking: bool = False):