diff --git a/.github/workflows/simod.yml b/.github/workflows/simod.yml index 395ddcd4..ecb35738 100644 --- a/.github/workflows/simod.yml +++ b/.github/workflows/simod.yml @@ -9,6 +9,7 @@ on: - 'tests/**' - '.github/workflows/**' - 'Dockerfile' + - 'pyproject.toml' pull_request: branches: [ master ] paths: @@ -57,7 +58,7 @@ jobs: run: poetry run pylint -j 0 --exit-zero src/simod > pylint.txt - name: Upload PyLint output - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: pylint.txt path: ./pylint.txt @@ -114,11 +115,13 @@ jobs: platforms: linux/amd64,linux/arm64 release-pypi: + name: Release, Python ${{ matrix.python-version }} needs: [ test ] runs-on: ubuntu-latest - outputs: - version: ${{ steps.versioning.outputs.version }} if: github.ref == 'refs/heads/master' + strategy: + matrix: + python-version: [ '3.9' ] environment: name: PyPI url: https://pypi.org/p/simod @@ -127,27 +130,37 @@ jobs: contents: write packages: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3 with: fetch-depth: 0 + - uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + architecture: x64 + - name: Install poetry + shell: bash run: pip install poetry + - name: Install project + run: | + poetry install + - name: Build - run: poetry install && poetry build + run: | + poetry build - - name: Generate licenses.md + - name: Get version + id: get_version run: | - poetry run pip install pip-licenses - poetry run pip-licenses --with-system --with-urls --format=markdown --output-file=licenses.md + echo "version=$(poetry version --short)" >> "$GITHUB_OUTPUT" - - name: Upload licenses.md - uses: actions/upload-artifact@v3 + - name: Upload artifacts + uses: actions/upload-artifact@v4 with: - name: licenses.md - path: licenses.md + name: simod-${{ steps.get_version.outputs.version }}-py${{ matrix.python-version }} + path: dist - name: Generate changelog run: | @@ -158,22 +171,18 @@ jobs: echo "" >> CHANGELOG.md echo "\`\`\`" >> CHANGELOG.md - - name: Get the version - id: versioning - run: echo "version=$(poetry version --short)" >> "$GITHUB_OUTPUT" - - - name: Assign a version tag + - name: Assign repository tag run: | - git tag ${{ steps.versioning.outputs.version }} + git tag ${{ steps.get_version.outputs.version }} git push --tags - - name: Create a release + - name: Create release uses: softprops/action-gh-release@v1 with: files: | dist/* - tag_name: ${{ steps.versioning.outputs.version }} + tag_name: ${{ steps.get_version.outputs.version }} body_path: CHANGELOG.md - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@release/v1 \ No newline at end of file + uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 00000000..59d8226c --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,22 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version, and other tools you might need +build: + os: ubuntu-24.04 + tools: + python: "3.9" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally, but recommended, +# declare the Python requirements required to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt diff --git a/README.md b/README.md index d7bdca5c..b8887ddf 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,13 @@ ![Simod](https://github.com/AutomatedProcessImprovement/Simod/actions/workflows/simod.yml/badge.svg) ![version](https://img.shields.io/github/v/tag/AutomatedProcessImprovement/simod) +[![Documentation Status](https://readthedocs.org/projects/simod/badge/?version=latest)](https://simod.readthedocs.io/en/latest/) -Simod combines process mining and machine learning techniques to automate the discovery and tuning of Business Process +SIMOD combines process mining and machine learning techniques to automate the discovery and tuning of Business Process Simulation models from event logs extracted from enterprise information systems (ERPs, CRM, case management systems, etc.). -Simod takes as input an event log in CSV format, a configuration file, and (optionally) a BPMN process model, and -returns a business process simulation scenario that can be simulated using +SIMOD takes as input an event log in CSV format, a configuration file, and (optionally) a BPMN process model, and +discovers a business process simulation model that can be simulated using the [Prosimos](https://github.com/AutomatedProcessImprovement/Prosimos) simulator, which is embedded in Simod. ## Dependencies @@ -23,8 +24,7 @@ the [Prosimos](https://github.com/AutomatedProcessImprovement/Prosimos) simulato ### Optional Depending on your CPU architecture, some dependencies might not be pre-compiled for your platform. In that case, you -will -most likely also need the following dependencies: +will most likely also need the following dependencies: | Dependency | Version | Notes | |----------------|---------|--------------------------------------------------| @@ -43,7 +43,7 @@ pip install simod simod --configuration resources/config/configuration_example.yml ``` -Use your own configuration file instead of `resources/config/configuration_example.yml` and specify the path to the +Use your own configuration file instead of `resources/config/configuration_example.yml` and specify the path to the event log in the configuration file itself. Paths are relative to the configuration file, or absolute. PyPI project is available at https://pypi.org/project/simod/. @@ -79,6 +79,10 @@ of each element: - Basic configuration to discover the full BPS model ([here](https://github.com/AutomatedProcessImprovement/Simod/blob/master/resources/config/configuration_example.yml)). +- Basic configuration to discover the full BPS model using fuzzy (probabilistic) resource + calendars ([here](https://github.com/AutomatedProcessImprovement/Simod/blob/master/resources/config/configuration_example_fuzzy.yml)). +- Basic configuration to discover the full BPS model with data-aware branching rules + ([here](https://github.com/AutomatedProcessImprovement/Simod/blob/master/resources/config/configuration_example_data_aware.yml)). - Basic configuration to discover the full BPS model, and evaluate it with a specified event log ([here](https://github.com/AutomatedProcessImprovement/Simod/blob/master/resources/config/configuration_example_with_evaluation.yml)). - Basic configuration to discover a BPS model with a provided BPMN process model as starting @@ -108,3 +112,8 @@ Coverage: ```shell poetry run pytest -m "not integration" --cov=simod ``` + +### Documentation + +For more details about the installation, usage, and implementation **visit the documentation here:** +📖 ️ [https://simod.readthedocs.io/en/latest/](https://simod.readthedocs.io/en/latest/) diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..d0c3cbf1 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 00000000..dc1312ab --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..2eefa39c --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,24 @@ +click==8.1.8 +hyperopt==0.2.7 +lxml==5.3.0 +matplotlib==3.9.4 +networkx==3.2.1 +numpy==1.26.4 +pandas==2.2.3 +pendulum==3.0.0 +pydantic==2.10.6 +python-dotenv==1.0.1 +python-multipart==0.0.12 +pytz==2024.2 +PyYAML==6.0.2 +requests==2.32.3 +scipy==1.13.1 +statistics==1.0.3.5 +tqdm==4.67.1 +xmltodict==0.13.0 +prosimos==2.0.6 +extraneous-activity-delays==2.2.1 +openxes-cli-py==0.1.15 +pix-framework==0.13.17 +log-distance-measures==2.0.2 +sphinx-rtd-theme diff --git a/docs/source/_static/complete_configuration.yml b/docs/source/_static/complete_configuration.yml new file mode 100644 index 00000000..f37603e7 --- /dev/null +++ b/docs/source/_static/complete_configuration.yml @@ -0,0 +1,146 @@ +version: 5 + +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically estimated (see preprocessing) + start_time: "start_time" # Should be present, but if not, can be estimated (see preprocessing) + end_time: "end_time" + # Use this process model and skip its discovery + process_model_path: ../models/LoanApp_simplified.bpmn + # Event log to evaluate the discovered BPS model with + test_log_path: ../event_logs/LoanApp_simplified_test.csv.gz + # Flag to perform evaluation (if 'test_log_path' not provided) with a test partition of the input log + perform_final_evaluation: true + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model (reported in an output file) + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to simulate the arrival times using the distribution of inter-arrival times observed in the training log, + # or fitting a parameterized probabilistic distribution (e.g., norm, expon) with these observed values. + use_observed_arrival_distribution: false + # Whether to delete all files created during the optimization phases or not + clean_intermediate_files: true + # Whether to discover global/case/event attributes and their update rules or not + discover_data_attributes: false + +################# +# Preprocessing # +################# +preprocessing: + # If the log has start times, threshold to consider two activities as concurrent when computing the enabled time + # (if necessary). Two activities would be considered concurrent if their occurrences happening concurrently divided + # by their total occurrences is higher than this threshold. + enable_time_concurrency_threshold: 0.75 + # If true, preprocess multitasking (i.e., one resource performing more than one activity at the same time) by + # adjusting the timestamps (start/end) of those activities being executed at the same time by the same resource. + multitasking: false + # Thresholds for the heuristics' concurrency oracle (only used to estimate start times if missing). + concurrency_df: 0.9 # Directly-Follows threshold + concurrency_l2l: 0.9 # Length 2 loops threshold + concurrency_l1l: 0.9 # Length 1 loops threshold + +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: n_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Methods for discovering gateway probabilities + gateway_probabilities: + - equiprobable + - discovery + # Discover process model with SplitMiner v1 (options: sm1 or sm2) + mining_algorithm: sm1 + # For Split Miner v1 and v2: Number of concurrent relations between events to be captured (between 0.0 and 1.0) + epsilon: + - 0.05 + - 0.4 + # Only for Split Miner v1: Threshold for filtering the incoming and outgoing edges (between 0.0 and 1.0) + eta: + - 0.2 + - 0.7 + # Only for Split Miner v1: Whether to replace non-trivial OR joins or not (true or false) + replace_or_joins: + - true + - false + # Only for Split Miner v1: Whether to prioritize parallelism over loops or not (true or false) + prioritize_parallelism: + - true + - false + # Discover data-aware branching rules, i.e., BPMN decision points based on value of data attributes + discover_branch_rules: true + # Minimum f-score value to consider the discovered data-aware branching rules + f_score: + - 0.3 + - 0.9 + +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type (fuzzy, differentiated, pool, undifferentiated) + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: + - 15 + - 60 + # Minimum confidence of the intervals in the discovered calendar of a resource or set of resources (between 0.0 and 1.0) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar of a resource or set of resources (between 0.0 and 1.0) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them, gathered together otherwise (between 0.0 and 1.0) + participation: + - 0.2 + - 0.5 + # Angle of the fuzzy trapezoid when computing the availability probability for an activity (angle from start to end) + fuzzy_angle: + - 0.1 + - 0.9 + +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: relative_emd + # Method to compute the extraneous delay (naive or eclipse-aware) + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 diff --git a/docs/source/_static/configuration_example.yml b/docs/source/_static/configuration_example.yml new file mode 100644 index 00000000..186512ea --- /dev/null +++ b/docs/source/_static/configuration_example.yml @@ -0,0 +1,98 @@ +################################################################################################################# +# Simple configuration example with i) no evaluation of the final BPS model, ii) 20 iterations of control-flow # +# discovery, iii) 20 iterations of resource model (differentiated) discovery, and iv) direct discovery of # +# extraneous delays. # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Method for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: + - true + - false +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 diff --git a/docs/source/_static/configuration_example_data_aware.yml b/docs/source/_static/configuration_example_data_aware.yml new file mode 100644 index 00000000..aeaddffb --- /dev/null +++ b/docs/source/_static/configuration_example_data_aware.yml @@ -0,0 +1,104 @@ +################################################################################################################# +# Simple configuration example with i) no evaluation of the final BPS model, ii) 10 iterations of control-flow # +# discovery (BPMN model provided) with data-aware decision points, iii) 20 iterations of resource model # +# (differentiated) discovery, and iv) no discovery of extraneous delays. # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Whether to discover case attributes or not + discover_data_attributes: true +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Method for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: + - true + - false + # Discover data-aware branching rules, i.e., BPMN decision points based on value of data attributes + discover_branch_rules: true + # Minimum f-score value to consider the discovered data-aware branching rules + f_score: + - 0.3 + - 0.9 +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 diff --git a/docs/source/_static/configuration_example_fuzzy.yml b/docs/source/_static/configuration_example_fuzzy.yml new file mode 100644 index 00000000..5e74418d --- /dev/null +++ b/docs/source/_static/configuration_example_fuzzy.yml @@ -0,0 +1,84 @@ +################################################################################################################# +# Simple configuration example with i) no evaluation of the final BPS model, ii) 20 iterations of control-flow # +# discovery, iii) 10 iterations of resource model (fuzzy availability) discovery, and iv) no discovery of # +# extraneous delays. # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Method for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: + - true + - false +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 10 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated_fuzzy + # Duration of each granule in the resource calendar that will get its own probability + granularity: 60 + # Angle of the fuzzy trapezoid when computing the availability probability for an activity (angle from start to end) + fuzzy_angle: + - 0.1 + - 0.9 diff --git a/docs/source/_static/configuration_example_with_evaluation.yml b/docs/source/_static/configuration_example_with_evaluation.yml new file mode 100644 index 00000000..10f412a7 --- /dev/null +++ b/docs/source/_static/configuration_example_with_evaluation.yml @@ -0,0 +1,109 @@ +################################################################################################################# +# Same simple configuration as 'configuration_example.yml' but evaluation the quality of the final BPS model # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Event log to evaluate the discovered BPS model with + test_log_path: ../event_logs/LoanApp_simplified_test.csv.gz + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: + - true + - false +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 diff --git a/docs/source/_static/configuration_example_with_provided_process_model.yml b/docs/source/_static/configuration_example_with_provided_process_model.yml new file mode 100644 index 00000000..b490161d --- /dev/null +++ b/docs/source/_static/configuration_example_with_provided_process_model.yml @@ -0,0 +1,80 @@ +################################################################################################################# +# Same simple configuration as 'configuration_example.yml' but providing the BPMN model # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Use this process model and skip its discovery + process_model_path: ../models/LoanApp_simplified.bpmn + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 1 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: pool + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 diff --git a/docs/source/_static/configuration_one_shot.yml b/docs/source/_static/configuration_one_shot.yml new file mode 100644 index 00000000..9c45b7e8 --- /dev/null +++ b/docs/source/_static/configuration_one_shot.yml @@ -0,0 +1,69 @@ +################################################################################################################# +# Simple configuration example for running SIMOD without parameter optimization steps. The defined parameters # +# should be individual values and not intervals, as there is no optimization. # +################################################################################################################# +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" +################ +# Control-flow # +################ +control_flow: + # Number of optimization iterations over the search space + num_iterations: 1 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 1 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: 0.3 + # Threshold for filtering the incoming and outgoing edges + eta: 0.5 + # Whether to replace non-trivial OR joins or not + replace_or_joins: false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true +################## +# Resource model # +################## +resource_model: + # Number of optimization iterations over the search space + num_iterations: 1 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 1 + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: 0.6 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: 0.2 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 diff --git a/docs/source/_static/simod.png b/docs/source/_static/simod.png new file mode 100644 index 00000000..72296044 Binary files /dev/null and b/docs/source/_static/simod.png differ diff --git a/docs/source/api.rst b/docs/source/api.rst new file mode 100644 index 00000000..4272ae75 --- /dev/null +++ b/docs/source/api.rst @@ -0,0 +1,186 @@ +API Reference +============= + +This section provides an overview of the Simod API. + +Usage +----- +To use Simod in your Python code, import the main components: + +.. code-block:: python + + from pathlib import Path + + from simod.event_log.event_log import EventLog + from simod.settings.simod_settings import SimodSettings + from simod.simod import Simod + + # Initialize 'output' folder and read configuration file + output = Path("///") + configuration_path = Path("//.yml") + settings = SimodSettings.from_path(configuration_path) + + # Read and preprocess event log + event_log = EventLog.from_path( + log_ids=settings.common.log_ids, + train_log_path=settings.common.train_log_path, + test_log_path=settings.common.test_log_path, + preprocessing_settings=settings.preprocessing, + need_test_partition=settings.common.perform_final_evaluation, + ) + + # Instantiate and run SIMOD + simod = Simod(settings=settings, event_log=event_log, output_dir=output) + simod.run() + +Modules Overview +---------------- + +Simod's codebase is organized into several key modules: + +- **simod**: The main class that orchestrates the overall functionality. +- **settings**: Handles the parsing and validation of configuration files. +- **event_log**: Manages the IO operations of an event log as well as its preprocessing. +- **control_flow**: Utilities to discover and manage the control-flow model of a BPS model. +- **resource_model**: Utilities to discover and manage the resource model of a BPS model. +- **extraneous_delays**: Utilities to discover and manage the extraneous delays model of a BPS model. +- **simulation**: Manages the data model of a BPS model and its simulation and quality assessment. + +Detailed Module Documentation +----------------------------- + +Below is the detailed documentation for each module: + +SIMOD class +^^^^^^^^^^^ + +.. automodule:: simod.simod + :members: + :undoc-members: + :exclude-members: final_bps_model + +Settings Module +^^^^^^^^^^^^^^^ + +SIMOD settings +"""""""""""""" + +.. automodule:: simod.settings.simod_settings + :members: + :undoc-members: + :exclude-members: model_config, common, preprocessing, control_flow, resource_model, extraneous_activity_delays, version + +Common settings +""""""""""""""" + +.. automodule:: simod.settings.common_settings + :members: + :undoc-members: + :exclude-members: model_config, train_log_path, log_ids, test_log_path, process_model_path, perform_final_evaluation, num_final_evaluations, evaluation_metrics, use_observed_arrival_distribution, clean_intermediate_files, discover_data_attributes, DL, TWO_GRAM_DISTANCE, THREE_GRAM_DISTANCE, CIRCADIAN_EMD, CIRCADIAN_WORKFORCE_EMD, ARRIVAL_EMD, RELATIVE_EMD, ABSOLUTE_EMD, CYCLE_TIME_EMD + +Preprocessing settings +"""""""""""""""""""""" + +.. automodule:: simod.settings.preprocessing_settings + :members: + :undoc-members: + :exclude-members: model_config, multitasking, enable_time_concurrency_threshold, concurrency_thresholds + +Control-flow model settings +""""""""""""""""""""""""""" + +.. automodule:: simod.settings.control_flow_settings + :members: + :undoc-members: + :exclude-members: model_config, SPLIT_MINER_V1, SPLIT_MINER_V2, optimization_metric, num_iterations, num_evaluations_per_iteration, gateway_probabilities, mining_algorithm, epsilon, eta, discover_branch_rules, f_score, replace_or_joins, prioritize_parallelism + +Resource model settings +""""""""""""""""""""""" + +.. automodule:: simod.settings.resource_model_settings + :members: + :undoc-members: + :exclude-members: model_config, optimization_metric, num_iterations, num_evaluations_per_iteration, discovery_type, granularity, confidence, support, participation, discover_prioritization_rules, discover_batching_rules, fuzzy_angle + +Extraneous delays settings +"""""""""""""""""""""""""" + +.. automodule:: simod.settings.extraneous_delays_settings + :members: + :undoc-members: + :exclude-members: model_config, optimization_metric, discovery_method, num_iterations, num_evaluations_per_iteration + +Event Log Module +^^^^^^^^^^^^^^^^ + +.. automodule:: simod.event_log.event_log + :members: + :undoc-members: + :exclude-members: write_xes, train_partition, validation_partition, train_validation_partition, test_partition, log_ids, process_name + +.. automodule:: simod.event_log.preprocessor + :members: + :undoc-members: + :exclude-members: MultitaskingSettings, Settings + +Control-flow Model Module +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: simod.control_flow.settings + :members: + :undoc-members: + :exclude-members: output_dir, provided_model_path, project_name, optimization_metric, gateway_probabilities_method, mining_algorithm, epsilon, eta, replace_or_joins, prioritize_parallelism, f_score, from_hyperopt_dict + +.. automodule:: simod.control_flow.optimizer + :members: + :undoc-members: + :exclude-members: event_log, initial_bps_model, settings, base_directory, best_bps_model, evaluation_measurements, cleanup + +.. automodule:: simod.control_flow.discovery + :members: + :undoc-members: + :exclude-members: add_bpmn_diagram_to_model, SplitMinerV1Settings, SplitMinerV2Settings, discover_process_model_with_split_miner_v1, discover_process_model_with_split_miner_v2 + +Resource Model Module +^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: simod.resource_model.settings + :members: + :undoc-members: + :exclude-members: output_dir, process_model_path, project_name, optimization_metric, calendar_discovery_params, discover_prioritization_rules, discover_batching_rules, from_hyperopt_dict + +.. automodule:: simod.resource_model.optimizer + :members: + :undoc-members: + :exclude-members: event_log, initial_bps_model, settings, base_directory, best_bps_model, evaluation_measurements, cleanup + +Extraneous Delays Model Module +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: simod.extraneous_delays.optimizer + :members: + :undoc-members: + :exclude-members: cleanup + +.. automodule:: simod.extraneous_delays.types + :members: + :undoc-members: + :exclude-members: activity_name, delay_id, duration_distribution + +.. automodule:: simod.extraneous_delays.utilities + :members: + :undoc-members: + :exclude-members: + +Simulation Module +^^^^^^^^^^^^^^^^^ + +.. automodule:: simod.simulation.parameters.BPS_model + :members: + :undoc-members: + :exclude-members: process_model, gateway_probabilities, case_arrival_model, resource_model, extraneous_delays, case_attributes, global_attributes, event_attributes, prioritization_rules, batching_rules, branch_rules, calendar_granularity + +.. automodule:: simod.simulation.prosimos + :members: + :undoc-members: + :exclude-members: simulate_in_parallel, evaluate_logs, bpmn_path, parameters_path, output_log_path, num_simulation_cases, simulation_start diff --git a/docs/source/citation.rst b/docs/source/citation.rst new file mode 100644 index 00000000..dead9db0 --- /dev/null +++ b/docs/source/citation.rst @@ -0,0 +1,45 @@ +Cite the Paper +============== + +When using SIMOD for a publication, please cite the following article in you paper: + +`[Citation pending] +`_ + +More References +^^^^^^^^^^^^^^^ + +`Camargo, M., Dumas, M., González, O., 2020. "Automated discovery of +business process simulation models from event logs". Decis. Support Syst. +134, 113284. +`_ + +`Chapela-Campa, D., Dumas, M., 2024. "Enhancing business process +simulation models with extraneous activity delays". Inf. Syst. 122, 102346. +`_ + +`Chapela-Campa, D., Benchekroun, I., Baron, O., Dumas, M., Krass, D., +Senderovich, A., 2025. "A framework for measuring the quality of business +process simulation models". Inf. Syst. 127, 102447. +`_ + +`Lashkevich, K., Milani, F., Chapela-Campa, D., Suvorau, I., Dumas, M., +2024. "Unveiling the causes of waiting time in business processes from event +logs". Inf. Syst. 126, 102434. +`_ + +`López-Pintado, O., Dumas, M., Berx, J., 2024a. "Discovery, simulation, and +optimization of business processes with differentiated resources". Inf. Syst. +120, 102289. +`_ + +`López-Pintado, O., Dumas, M., 2023. "Discovery and simulation of business +processes with probabilistic resource availability calendars", in: Proceedings +of the 5th International Conference on Process Mining (ICPM), IEEE. pp. +1–8. +`_ + +`López-Pintado, O., Murashko, S., Dumas, M., 2024b. "Discovery and +simulation of data-aware business processes", in: Proceedings of the 6th +International Conference on Process Mining (ICPM), IEEE. pp. 105–112. +`_ diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 00000000..2f8d4009 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,46 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = 'SIMOD' +copyright = '2025, UT Information Systems Research Group' +author = 'UT Information Systems Research Group' +release = '5.1.2' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +import os +import sys + +# Get the absolute path of the project's root directory +sys.path.insert(0, os.path.abspath("../../src")) # Adjust if necessary + +extensions = [ + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx" +] + +intersphinx_mapping = { + "python": ("https://docs.python.org/3.9", None), + "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), +} + +templates_path = ['_templates'] +exclude_patterns = [] +autodoc_class_attributes = False + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = 'sphinx_rtd_theme' +html_static_path = ['_static'] + +# Automatically generate summaries +autosummary_generate = True diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 00000000..666f98f6 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,43 @@ +.. SIMOD documentation master file, created by + sphinx-quickstart on Mon Jan 27 16:09:16 2025. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +SIMOD: Automated discovery of business process simulation models +================================================================ + +SIMOD combines process mining and machine learning techniques to automate the discovery and tuning of Business Process +Simulation models from event logs extracted from enterprise information systems (ERPs, CRM, case management systems, +etc.). SIMOD takes as input an event log in CSV format, a configuration file, and (optionally) a BPMN process model, +and discovers a business process simulation model that can be simulated using the Prosimos simulator, which is embedded +in SIMOD. + + +.. _fig_simod: +.. figure:: _static/simod.png + :align: center + :scale: 60% + + SIMOD main workflow. + + +In its standard workflow, SIMOD receives an event log and a configuration file, and +runs an iterative process to discover the BPS model that bests reflect the behavior captured in the input event log. +This iterative process is designed as a pipeline-based architecture composed of multiple stages that run a +TPE-optimization process to obtain the parameters that lead to the most accurate model. + +Alternatively, SIMOD can additionally receive as input a BPMN model of the process. In this case, SIMOD skips the +corresponding discovery phase, and builds the BPS model over the input BPMN model. + +.. note:: + This project is under active development. + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + installation + usage + api + citation diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 00000000..c1292089 --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,88 @@ +Installation Guide +================== + +This guide provides instructions on how to install SIMOD using **pip** (PyPI) or **Docker**. + +Prerequisites +------------- +Before installing SIMOD, ensure you have the following dependencies: + +Dependencies for local installation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **Python 3.9, 3.10, or 3.11**: The recommended version (extensively tested) is Python 3.9, however, it also works for + Python versions 3.10 and 3.11. +- **Java 1.8**: Ensure Java is installed and added to your system’s PATH (e.g., + `Java.com `_). +- **Rust and Cargo (\*)**: If you are on a system without precompiled dependencies, you may also need to compile Rust + and Cargo (install them using `rustup.rs `_). + +Dependencies for Docker installation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **Docker**: If you want to run SIMOD without installing dependencies, you can use the official Docker image (install + Docker from `https://www.docker.com/get-started/ `_). + +Installation via PyPI +--------------------- +The simplest way to install SIMOD is via **pip** from PyPI (`simod project `_): + +.. code-block:: bash + + python -m pip install simod + +Running SIMOD after installation: + +.. code-block:: bash + + simod --help + +Installation via Docker +----------------------- +If you prefer running SIMOD inside a **Docker container**, in an isolated environment without requiring Python or Java +installations, use the following commands: + +.. code-block:: bash + + docker pull nokal/simod + +To start a container: + +.. code-block:: bash + + docker run -it -v /path/to/resources/:/usr/src/Simod/resources -v /path/to/output:/usr/src/Simod/outputs nokal/simod bash + +Use the `resources/` directory to store event logs and configuration files. The `outputs/` directory will contain the +results of SIMOD. + +From inside the container, you can run SIMOD with: + +.. code-block:: bash + + poetry run simod --help + +Docker images for different SIMOD versions are available at `https://hub.docker.com/r/nokal/simod/tags `_ + +Installation via source code +---------------------------- +If you prefer to download the source code and compile it directly (you would need to have `git`, `python`, and +`poetry` installed), use the following commands: + +.. code-block:: bash + + git clone https://github.com/AutomatedProcessImprovement/Simod.git + + cd Simod + + python -m venv simod-env + + # source ./simod-env/Scripts/activate # for Linux systems + .\simod-env\Scripts\activate.bat + + poetry install + +Running SIMOD after installation: + +.. code-block:: bash + + simod --help diff --git a/docs/source/usage.rst b/docs/source/usage.rst new file mode 100644 index 00000000..cf837735 --- /dev/null +++ b/docs/source/usage.rst @@ -0,0 +1,83 @@ +Usage Guide +=========== + +This guide provides instructions on how to use SIMOD from command line to discover a BPS model out of an event log in +CSV format. + +Running Simod +------------- + +Once Simod is installed (see `Installation `_), you can run it by specifying a configuration file. + +Installed via PyPI or source code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + simod --configuration resources/config/configuration_example.yml + +Replace `resources/config/configuration_example.yml` with the path to your own configuration file. Paths can be +relative to the configuration file or absolute. + + +Installed via Docker +^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + poetry run simod --configuration resources/config/configuration_example.yml + +Replace `resources/config/configuration_example.yml` with the path to your own configuration file. Paths can be +relative to the configuration file or absolute. + +Configuration File +------------------ +The configuration file is a YAML file that specifies various parameters for Simod. Ensure that the path to your event +log is specified in the configuration file. Here are some configuration examples: + +- Basic configuration to discover the full BPS + model (`basic <_static/configuration_example.yml>`_). +- Basic configuration to discover the full BPS model using fuzzy (probabilistic) resource + calendars (`probabilistic <_static/configuration_example_fuzzy.yml>`_). +- Basic configuration to discover the full BPS model with data-aware branching rules + (`data-aware <_static/configuration_example_data_aware.yml>`_). +- Basic configuration to discover the full BPS model, and evaluate it with a specified event + log (`with evaluation <_static/configuration_example_with_evaluation.yml>`_). +- Basic configuration to discover a BPS model with a provided BPMN process model as starting + point (`with BPMN model <_static/configuration_example_with_provided_process_model.yml>`_). +- Basic configuration to discover a BPS model with no optimization process (one-shot) + (`one-shot <_static/configuration_one_shot.yml>`_). +- Complete configuration example with all the possible + parameters (`complete config <_static/complete_configuration.yml>`_). + +Event Log Format +---------------- +Simod takes as input an event log in CSV format. + +.. _tab_event_log: +.. table:: Sample of input event log format. + :align: center + + ======= =========== =================== =================== ======== + case_id activity start_time end_time resource + ======= =========== =================== =================== ======== + 512 Create PO 03/11/2021 08:00:00 03/11/2021 08:31:11 DIO + 513 Create PO 03/11/2021 08:34:21 03/11/2021 09:02:09 DIO + 514 Create PO 03/11/2021 09:11:11 03/11/2021 09:49:51 DIO + 512 Approve PO 03/11/2021 12:13:06 03/11/2021 12:44:21 Joseph + 513 Reject PO 03/11/2021 12:30:51 03/11/2021 13:15:50 Jolyne + 514 Approve PO 03/11/2021 12:59:11 03/11/2021 13:32:36 Joseph + 512 Check Stock 03/11/2021 14:22:10 03/11/2021 14:49:22 DIO + 514 Check Stock 03/11/2021 15:11:01 03/11/2021 15:46:12 DIO + 514 Order Goods 04/11/2021 09:46:12 04/11/2021 10:34:23 Joseph + 512 Pack Goods 04/11/2021 10:46:50 04/11/2021 11:18:02 Giorno + ======= =========== =================== =================== ======== + +The column names can be specified as part of the configuration file (`see here <_static/complete_configuration.yml>`_). + +Output +------ +Simod discovers a business process simulation model that can be simulated using the +`Prosimos simulator `_, which is embedded in Simod. + +Once SIMOD is finished, the discovered BPS model can be found in the `outputs` directory, under the folder `best_result`. diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 2e533a96..00000000 --- a/poetry.lock +++ /dev/null @@ -1,2331 +0,0 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.5.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.7" -files = [ - {file = "annotated_types-0.5.0-py3-none-any.whl", hash = "sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd"}, - {file = "annotated_types-0.5.0.tar.gz", hash = "sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802"}, -] - -[[package]] -name = "astroid" -version = "2.15.6" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.7.2" -files = [ - {file = "astroid-2.15.6-py3-none-any.whl", hash = "sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c"}, - {file = "astroid-2.15.6.tar.gz", hash = "sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd"}, -] - -[package.dependencies] -lazy-object-proxy = ">=1.4.0" -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} -wrapt = [ - {version = ">=1.11,<2", markers = "python_version < \"3.11\""}, - {version = ">=1.14,<2", markers = "python_version >= \"3.11\""}, -] - -[[package]] -name = "certifi" -version = "2023.7.22" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.2.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, - {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, -] - -[[package]] -name = "click" -version = "8.1.7" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "cloudpickle" -version = "2.2.1" -description = "Extended pickling support for Python objects" -optional = false -python-versions = ">=3.6" -files = [ - {file = "cloudpickle-2.2.1-py3-none-any.whl", hash = "sha256:61f594d1f4c295fa5cd9014ceb3a1fc4a70b0de1164b94fbc2d854ccba056f9f"}, - {file = "cloudpickle-2.2.1.tar.gz", hash = "sha256:d89684b8de9e34a2a43b3460fbca07d09d6e25ce858df4d5a44240403b6178f5"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "contourpy" -version = "1.1.0" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.8" -files = [ - {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"}, - {file = "contourpy-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3"}, - {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655"}, - {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48"}, - {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"}, - {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"}, - {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"}, - {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"}, - {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"}, - {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"}, - {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e"}, - {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18"}, - {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"}, - {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"}, - {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"}, - {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"}, - {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"}, - {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"}, - {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103"}, - {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70"}, - {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"}, - {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"}, - {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"}, - {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"}, - {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"}, - {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"}, - {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9"}, - {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15"}, - {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"}, - {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"}, - {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"}, - {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"}, - {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"}, - {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"}, - {file = "contourpy-1.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76"}, - {file = "contourpy-1.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027"}, - {file = "contourpy-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104"}, - {file = "contourpy-1.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f"}, - {file = "contourpy-1.1.0.tar.gz", hash = "sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21"}, -] - -[package.dependencies] -numpy = ">=1.16" - -[package.extras] -bokeh = ["bokeh", "selenium"] -docs = ["furo", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.2.0)", "types-Pillow"] -test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "wurlitzer"] - -[[package]] -name = "coverage" -version = "7.3.0" -description = "Code coverage measurement for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "coverage-7.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db76a1bcb51f02b2007adacbed4c88b6dee75342c37b05d1822815eed19edee5"}, - {file = "coverage-7.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c02cfa6c36144ab334d556989406837336c1d05215a9bdf44c0bc1d1ac1cb637"}, - {file = "coverage-7.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477c9430ad5d1b80b07f3c12f7120eef40bfbf849e9e7859e53b9c93b922d2af"}, - {file = "coverage-7.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce2ee86ca75f9f96072295c5ebb4ef2a43cecf2870b0ca5e7a1cbdd929cf67e1"}, - {file = "coverage-7.3.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68d8a0426b49c053013e631c0cdc09b952d857efa8f68121746b339912d27a12"}, - {file = "coverage-7.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b3eb0c93e2ea6445b2173da48cb548364f8f65bf68f3d090404080d338e3a689"}, - {file = "coverage-7.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:90b6e2f0f66750c5a1178ffa9370dec6c508a8ca5265c42fbad3ccac210a7977"}, - {file = "coverage-7.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:96d7d761aea65b291a98c84e1250cd57b5b51726821a6f2f8df65db89363be51"}, - {file = "coverage-7.3.0-cp310-cp310-win32.whl", hash = "sha256:63c5b8ecbc3b3d5eb3a9d873dec60afc0cd5ff9d9f1c75981d8c31cfe4df8527"}, - {file = "coverage-7.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:97c44f4ee13bce914272589b6b41165bbb650e48fdb7bd5493a38bde8de730a1"}, - {file = "coverage-7.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:74c160285f2dfe0acf0f72d425f3e970b21b6de04157fc65adc9fd07ee44177f"}, - {file = "coverage-7.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b543302a3707245d454fc49b8ecd2c2d5982b50eb63f3535244fd79a4be0c99d"}, - {file = "coverage-7.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad0f87826c4ebd3ef484502e79b39614e9c03a5d1510cfb623f4a4a051edc6fd"}, - {file = "coverage-7.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13c6cbbd5f31211d8fdb477f0f7b03438591bdd077054076eec362cf2207b4a7"}, - {file = "coverage-7.3.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac440c43e9b479d1241fe9d768645e7ccec3fb65dc3a5f6e90675e75c3f3e3a"}, - {file = "coverage-7.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c9834d5e3df9d2aba0275c9f67989c590e05732439b3318fa37a725dff51e74"}, - {file = "coverage-7.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4c8e31cf29b60859876474034a83f59a14381af50cbe8a9dbaadbf70adc4b214"}, - {file = "coverage-7.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7a9baf8e230f9621f8e1d00c580394a0aa328fdac0df2b3f8384387c44083c0f"}, - {file = "coverage-7.3.0-cp311-cp311-win32.whl", hash = "sha256:ccc51713b5581e12f93ccb9c5e39e8b5d4b16776d584c0f5e9e4e63381356482"}, - {file = "coverage-7.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:887665f00ea4e488501ba755a0e3c2cfd6278e846ada3185f42d391ef95e7e70"}, - {file = "coverage-7.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d000a739f9feed900381605a12a61f7aaced6beae832719ae0d15058a1e81c1b"}, - {file = "coverage-7.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59777652e245bb1e300e620ce2bef0d341945842e4eb888c23a7f1d9e143c446"}, - {file = "coverage-7.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9737bc49a9255d78da085fa04f628a310c2332b187cd49b958b0e494c125071"}, - {file = "coverage-7.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5247bab12f84a1d608213b96b8af0cbb30d090d705b6663ad794c2f2a5e5b9fe"}, - {file = "coverage-7.3.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ac9a1de294773b9fa77447ab7e529cf4fe3910f6a0832816e5f3d538cfea9a"}, - {file = "coverage-7.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:85b7335c22455ec12444cec0d600533a238d6439d8d709d545158c1208483873"}, - {file = "coverage-7.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:36ce5d43a072a036f287029a55b5c6a0e9bd73db58961a273b6dc11a2c6eb9c2"}, - {file = "coverage-7.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:211a4576e984f96d9fce61766ffaed0115d5dab1419e4f63d6992b480c2bd60b"}, - {file = "coverage-7.3.0-cp312-cp312-win32.whl", hash = "sha256:56afbf41fa4a7b27f6635bc4289050ac3ab7951b8a821bca46f5b024500e6321"}, - {file = "coverage-7.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:7f297e0c1ae55300ff688568b04ff26b01c13dfbf4c9d2b7d0cb688ac60df479"}, - {file = "coverage-7.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac0dec90e7de0087d3d95fa0533e1d2d722dcc008bc7b60e1143402a04c117c1"}, - {file = "coverage-7.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:438856d3f8f1e27f8e79b5410ae56650732a0dcfa94e756df88c7e2d24851fcd"}, - {file = "coverage-7.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1084393c6bda8875c05e04fce5cfe1301a425f758eb012f010eab586f1f3905e"}, - {file = "coverage-7.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49ab200acf891e3dde19e5aa4b0f35d12d8b4bd805dc0be8792270c71bd56c54"}, - {file = "coverage-7.3.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67e6bbe756ed458646e1ef2b0778591ed4d1fcd4b146fc3ba2feb1a7afd4254"}, - {file = "coverage-7.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f39c49faf5344af36042b293ce05c0d9004270d811c7080610b3e713251c9b0"}, - {file = "coverage-7.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7df91fb24c2edaabec4e0eee512ff3bc6ec20eb8dccac2e77001c1fe516c0c84"}, - {file = "coverage-7.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:34f9f0763d5fa3035a315b69b428fe9c34d4fc2f615262d6be3d3bf3882fb985"}, - {file = "coverage-7.3.0-cp38-cp38-win32.whl", hash = "sha256:bac329371d4c0d456e8d5f38a9b0816b446581b5f278474e416ea0c68c47dcd9"}, - {file = "coverage-7.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b859128a093f135b556b4765658d5d2e758e1fae3e7cc2f8c10f26fe7005e543"}, - {file = "coverage-7.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed8d310afe013db1eedd37176d0839dc66c96bcfcce8f6607a73ffea2d6ba"}, - {file = "coverage-7.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61260ec93f99f2c2d93d264b564ba912bec502f679793c56f678ba5251f0393"}, - {file = "coverage-7.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97af9554a799bd7c58c0179cc8dbf14aa7ab50e1fd5fa73f90b9b7215874ba28"}, - {file = "coverage-7.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3558e5b574d62f9c46b76120a5c7c16c4612dc2644c3d48a9f4064a705eaee95"}, - {file = "coverage-7.3.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37d5576d35fcb765fca05654f66aa71e2808d4237d026e64ac8b397ffa66a56a"}, - {file = "coverage-7.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:07ea61bcb179f8f05ffd804d2732b09d23a1238642bf7e51dad62082b5019b34"}, - {file = "coverage-7.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:80501d1b2270d7e8daf1b64b895745c3e234289e00d5f0e30923e706f110334e"}, - {file = "coverage-7.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4eddd3153d02204f22aef0825409091a91bf2a20bce06fe0f638f5c19a85de54"}, - {file = "coverage-7.3.0-cp39-cp39-win32.whl", hash = "sha256:2d22172f938455c156e9af2612650f26cceea47dc86ca048fa4e0b2d21646ad3"}, - {file = "coverage-7.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:60f64e2007c9144375dd0f480a54d6070f00bb1a28f65c408370544091c9bc9e"}, - {file = "coverage-7.3.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:5492a6ce3bdb15c6ad66cb68a0244854d9917478877a25671d70378bdc8562d0"}, - {file = "coverage-7.3.0.tar.gz", hash = "sha256:49dbb19cdcafc130f597d9e04a29d0a032ceedf729e41b181f51cd170e6ee865"}, -] - -[package.dependencies] -tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} - -[package.extras] -toml = ["tomli"] - -[[package]] -name = "cycler" -version = "0.11.0" -description = "Composable style cycles" -optional = false -python-versions = ">=3.6" -files = [ - {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, - {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, -] - -[[package]] -name = "dill" -version = "0.3.7" -description = "serialize all of Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, - {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] - -[[package]] -name = "docutils" -version = "0.20.1" -description = "Docutils -- Python Documentation Utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"}, - {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.1.3" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, - {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "extraneous-activity-delays" -version = "2.1.19" -description = "" -optional = false -python-versions = ">=3.9,<3.12" -files = [ - {file = "extraneous_activity_delays-2.1.19-py3-none-any.whl", hash = "sha256:3c3fd5b732b18101a7d67a948f147f059aaa8e605d52fec52410802ce52f2512"}, - {file = "extraneous_activity_delays-2.1.19.tar.gz", hash = "sha256:77df24657233faf687ea344756db31a02d91eba50146116f8f732b0ec787eb8e"}, -] - -[package.dependencies] -hyperopt = ">=0.2.7,<0.3.0" -log-distance-measures = ">=1.0.2,<2.0.0" -lxml = ">=4.9.2,<5.0.0" -pix-framework = ">=0.13.0,<0.14.0" -prosimos = ">=2.0.0,<3.0.0" - -[[package]] -name = "fonttools" -version = "4.42.1" -description = "Tools to manipulate font files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fonttools-4.42.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ed1a13a27f59d1fc1920394a7f596792e9d546c9ca5a044419dca70c37815d7c"}, - {file = "fonttools-4.42.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9b1ce7a45978b821a06d375b83763b27a3a5e8a2e4570b3065abad240a18760"}, - {file = "fonttools-4.42.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f720fa82a11c0f9042376fd509b5ed88dab7e3cd602eee63a1af08883b37342b"}, - {file = "fonttools-4.42.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db55cbaea02a20b49fefbd8e9d62bd481aaabe1f2301dabc575acc6b358874fa"}, - {file = "fonttools-4.42.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a35981d90feebeaef05e46e33e6b9e5b5e618504672ca9cd0ff96b171e4bfff"}, - {file = "fonttools-4.42.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:68a02bbe020dc22ee0540e040117535f06df9358106d3775e8817d826047f3fd"}, - {file = "fonttools-4.42.1-cp310-cp310-win32.whl", hash = "sha256:12a7c247d1b946829bfa2f331107a629ea77dc5391dfd34fdcd78efa61f354ca"}, - {file = "fonttools-4.42.1-cp310-cp310-win_amd64.whl", hash = "sha256:a398bdadb055f8de69f62b0fc70625f7cbdab436bbb31eef5816e28cab083ee8"}, - {file = "fonttools-4.42.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:689508b918332fb40ce117131633647731d098b1b10d092234aa959b4251add5"}, - {file = "fonttools-4.42.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e36344e48af3e3bde867a1ca54f97c308735dd8697005c2d24a86054a114a71"}, - {file = "fonttools-4.42.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19b7db825c8adee96fac0692e6e1ecd858cae9affb3b4812cdb9d934a898b29e"}, - {file = "fonttools-4.42.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:113337c2d29665839b7d90b39f99b3cac731f72a0eda9306165a305c7c31d341"}, - {file = "fonttools-4.42.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:37983b6bdab42c501202500a2be3a572f50d4efe3237e0686ee9d5f794d76b35"}, - {file = "fonttools-4.42.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6ed2662a3d9c832afa36405f8748c250be94ae5dfc5283d668308391f2102861"}, - {file = "fonttools-4.42.1-cp311-cp311-win32.whl", hash = "sha256:179737095eb98332a2744e8f12037b2977f22948cf23ff96656928923ddf560a"}, - {file = "fonttools-4.42.1-cp311-cp311-win_amd64.whl", hash = "sha256:f2b82f46917d8722e6b5eafeefb4fb585d23babd15d8246c664cd88a5bddd19c"}, - {file = "fonttools-4.42.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:62f481ac772fd68901573956231aea3e4b1ad87b9b1089a61613a91e2b50bb9b"}, - {file = "fonttools-4.42.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2f806990160d1ce42d287aa419df3ffc42dfefe60d473695fb048355fe0c6a0"}, - {file = "fonttools-4.42.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db372213d39fa33af667c2aa586a0c1235e88e9c850f5dd5c8e1f17515861868"}, - {file = "fonttools-4.42.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d18fc642fd0ac29236ff88ecfccff229ec0386090a839dd3f1162e9a7944a40"}, - {file = "fonttools-4.42.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8708b98c278012ad267ee8a7433baeb809948855e81922878118464b274c909d"}, - {file = "fonttools-4.42.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c95b0724a6deea2c8c5d3222191783ced0a2f09bd6d33f93e563f6f1a4b3b3a4"}, - {file = "fonttools-4.42.1-cp38-cp38-win32.whl", hash = "sha256:4aa79366e442dbca6e2c8595645a3a605d9eeabdb7a094d745ed6106816bef5d"}, - {file = "fonttools-4.42.1-cp38-cp38-win_amd64.whl", hash = "sha256:acb47f6f8680de24c1ab65ebde39dd035768e2a9b571a07c7b8da95f6c8815fd"}, - {file = "fonttools-4.42.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb289b7a815638a7613d46bcf324c9106804725b2bb8ad913c12b6958ffc4ec"}, - {file = "fonttools-4.42.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:53eb5091ddc8b1199330bb7b4a8a2e7995ad5d43376cadce84523d8223ef3136"}, - {file = "fonttools-4.42.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46a0ec8adbc6ff13494eb0c9c2e643b6f009ce7320cf640de106fb614e4d4360"}, - {file = "fonttools-4.42.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cc7d685b8eeca7ae69dc6416833fbfea61660684b7089bca666067cb2937dcf"}, - {file = "fonttools-4.42.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:be24fcb80493b2c94eae21df70017351851652a37de514de553435b256b2f249"}, - {file = "fonttools-4.42.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:515607ec756d7865f23070682622c49d922901943697871fc292277cf1e71967"}, - {file = "fonttools-4.42.1-cp39-cp39-win32.whl", hash = "sha256:0eb79a2da5eb6457a6f8ab904838454accc7d4cccdaff1fd2bd3a0679ea33d64"}, - {file = "fonttools-4.42.1-cp39-cp39-win_amd64.whl", hash = "sha256:7286aed4ea271df9eab8d7a9b29e507094b51397812f7ce051ecd77915a6e26b"}, - {file = "fonttools-4.42.1-py3-none-any.whl", hash = "sha256:9398f244e28e0596e2ee6024f808b06060109e33ed38dcc9bded452fd9bbb853"}, - {file = "fonttools-4.42.1.tar.gz", hash = "sha256:c391cd5af88aacaf41dd7cfb96eeedfad297b5899a39e12f4c2c3706d0a3329d"}, -] - -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "scipy"] -lxml = ["lxml (>=4.0,<5)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.0.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - -[[package]] -name = "future" -version = "0.18.3" -description = "Clean single-source support for Python 3 and 2" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "future-0.18.3.tar.gz", hash = "sha256:34a17436ed1e96697a86f9de3d15a3b0be01d8bc8de9c1dffd59fb8234ed5307"}, -] - -[[package]] -name = "greenlet" -version = "2.0.2" -description = "Lightweight in-process concurrent programming" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" -files = [ - {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, - {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, - {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, - {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, - {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, - {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, - {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, - {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, - {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, - {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, - {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, - {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, - {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, - {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, - {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, - {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, - {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, - {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, - {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, - {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, - {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, - {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, - {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, - {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, - {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, - {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, - {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, - {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, - {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, - {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, - {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, - {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, - {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, - {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, -] - -[package.extras] -docs = ["Sphinx", "docutils (<0.18)"] -test = ["objgraph", "psutil"] - -[[package]] -name = "hyperopt" -version = "0.2.7" -description = "Distributed Asynchronous Hyperparameter Optimization" -optional = false -python-versions = "*" -files = [ - {file = "hyperopt-0.2.7-py2.py3-none-any.whl", hash = "sha256:f3046d91fe4167dbf104365016596856b2524a609d22f047a066fc1ac796427c"}, - {file = "hyperopt-0.2.7.tar.gz", hash = "sha256:1bf89ae58050bbd32c7307199046117feee245c2fd9ab6255c7308522b7ca149"}, -] - -[package.dependencies] -cloudpickle = "*" -future = "*" -networkx = ">=2.2" -numpy = "*" -py4j = "*" -scipy = "*" -six = "*" -tqdm = "*" - -[package.extras] -atpe = ["lightgbm", "scikit-learn"] -dev = ["black", "nose", "pre-commit", "pytest"] -mongotrials = ["pymongo"] -sparktrials = ["pyspark"] - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "importlib-resources" -version = "6.0.1" -description = "Read resources from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_resources-6.0.1-py3-none-any.whl", hash = "sha256:134832a506243891221b88b4ae1213327eea96ceb4e407a00d790bb0626f45cf"}, - {file = "importlib_resources-6.0.1.tar.gz", hash = "sha256:4359457e42708462b9626a04657c6208ad799ceb41e5c58c57ffa0e6a098a5d4"}, -] - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.12.0" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, - {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, -] - -[package.extras] -colors = ["colorama (>=0.4.3)"] -pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] -plugins = ["setuptools"] -requirements-deprecated-finder = ["pip-api", "pipreqs"] - -[[package]] -name = "jellyfish" -version = "0.11.2" -description = "Approximate and phonetic matching of strings." -optional = false -python-versions = ">=3.7" -files = [ - {file = "jellyfish-0.11.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:58385a72663e53d753c8c3131d609f35be841068ac319c507bc49c951333b394"}, - {file = "jellyfish-0.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ec049a17942be3ecfd239a8fe9cf34caaf063ac9c14e700fe59b74528798aedd"}, - {file = "jellyfish-0.11.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d51a3cc18b1143f03c135ea34919daf2f87126c5e55b0f2e60e4616f1765f8d"}, - {file = "jellyfish-0.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1b1ae7e64e9d58c0d4ce2278a229f3f7ead5eb2744f90369f3967f3c666f28f"}, - {file = "jellyfish-0.11.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b2e563bdaa9abb028b4a9bca9903aaa463a2bae7b05e7af50326d2c1ba959f8e"}, - {file = "jellyfish-0.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4db605459280deefc2b3497932aeb2784c54ede2aec786a4120cf650281652ad"}, - {file = "jellyfish-0.11.2-cp310-none-win32.whl", hash = "sha256:bd8000a32da09cbb717d7434c39bf7421e7d8367a711fe617fa6addee3572740"}, - {file = "jellyfish-0.11.2-cp310-none-win_amd64.whl", hash = "sha256:9df6bb8ba3c6f2508dc030ed77e489f427d44cf24557a0a8ab2bba3c19af99d6"}, - {file = "jellyfish-0.11.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:54af2ca0db82b57c022aa3e3a5ed5fa57ac2b8ab3c005ecefc975a648bf771c6"}, - {file = "jellyfish-0.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f44546131011cbaa76f2a38d87faeec73524efa812d04b7d3edbf6e6e76c4969"}, - {file = "jellyfish-0.11.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6245916cb73242828ba4f8f44dec3f149b96965848d59d3d66b1f809208dc39a"}, - {file = "jellyfish-0.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ba932f17566a21c009dac8167c0289dd2175c219eea3f3f695d043c50989f46"}, - {file = "jellyfish-0.11.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a9e70e018c9620378c95b28de6d597c6cf87cd0b9e9b446444468e7e411b159d"}, - {file = "jellyfish-0.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:44cbafbe1bdf9e878ac144880d15f31ab79fb4f5fb22a7df55378519d80cfdcc"}, - {file = "jellyfish-0.11.2-cp311-none-win32.whl", hash = "sha256:0cbe9f573dfd0bffbe60fb6980d54e65c7772dec217c4bc68392141458c8406e"}, - {file = "jellyfish-0.11.2-cp311-none-win_amd64.whl", hash = "sha256:cad06b9d0f76d5d030bcb8b86454e50aae8166b1c507d3d610743abcf8b7881a"}, - {file = "jellyfish-0.11.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91912106d47b5367704d3e222822750998610a12b1aa9b259eb38bf059aa2383"}, - {file = "jellyfish-0.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76c452ef0f0241fabbd6943abfccbcb29dc6078ccc1dcef066fe537bb518ad6e"}, - {file = "jellyfish-0.11.2-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:4ea2777bbee00e896c9d5bb3a146f6e2387b3c83e0f9bdfa53aef824010ae14b"}, - {file = "jellyfish-0.11.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:7d38c2f19ec0b8b217678074b5ab56d9f44e075327b1fb0d2aa4a9e2968b27b5"}, - {file = "jellyfish-0.11.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fab569e574a40fa5e9268d0a00f38d808b997f777a0583e2b9ba135a9536a02"}, - {file = "jellyfish-0.11.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2f7172d7c5fda4222f98247ab8d366a4ef879350b927dfbdefe1dd6dd83ef3"}, - {file = "jellyfish-0.11.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3aea11b9a0699bdaa0e15df2e3beeceb5cac82ab072b35f2997ecc3493240027"}, - {file = "jellyfish-0.11.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ff959e48103f4c7a65a7fd67c5783d8939ecfbc3d3ad2b726030b0652e781e41"}, - {file = "jellyfish-0.11.2-cp37-none-win32.whl", hash = "sha256:7be70324908f9f4be6c06278cd9be58d8a30b6d25f5eb7522537c5da08819ade"}, - {file = "jellyfish-0.11.2-cp37-none-win_amd64.whl", hash = "sha256:bfff0dc1d6d470183e8e0e76b798f81a7ccfaef92c409647dbc0fb4d0a01e1d5"}, - {file = "jellyfish-0.11.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:de6c1d9f7e9d2e65e23774d792054bcc9b995d6fae447b0cf99e7be12926f28b"}, - {file = "jellyfish-0.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e097c439f33eecdd85ef11a30158905c8e7d2888a163adbbe9f11c96af1af34b"}, - {file = "jellyfish-0.11.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02611b975311694bc98789f03c12bbd679cba4a95b74d0f51f264a4bf7b14021"}, - {file = "jellyfish-0.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7872acd036f2edf1bbe503ee26dd218216f62a9ab717d9d984a8504234cc484"}, - {file = "jellyfish-0.11.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3ded3e9b5aa82371281f494fbbfab9a0fa79b0a66bf529b63c109fe0328d23c5"}, - {file = "jellyfish-0.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fdc44007f3f69b8637edc79e8fed0a75a3d4fc08209167aadbe4cf9724469e90"}, - {file = "jellyfish-0.11.2-cp38-none-win32.whl", hash = "sha256:c82b72feca25036bda4ea4e355cc06707e61724e970672a987e76bf2b2fc6922"}, - {file = "jellyfish-0.11.2-cp38-none-win_amd64.whl", hash = "sha256:2d28edaaae08b2af9babf39b2e7d30571217fbc70168d88fbdef414e53177ca8"}, - {file = "jellyfish-0.11.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:1d7b7fa3e0e6c7c83fc0fd1e3abce2fa7d72945c97da9bf9b84d396bbfcaf61a"}, - {file = "jellyfish-0.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:790ad5b36796f521189a609120689840540b3d7a44e64b7bc2007ebab6c96d52"}, - {file = "jellyfish-0.11.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69a53c1ccf26ad480a277ee3147c7db9284511d79e1aa117855078423798d277"}, - {file = "jellyfish-0.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:010ebed019b7efa27171acb66ca5e7d4f40ab0b122663e6b4062ac22816b5d9a"}, - {file = "jellyfish-0.11.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:de3a153b1b915d8e37ce97c47b90ebad061624ae922bf3c250b1e0c3362c3ade"}, - {file = "jellyfish-0.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:519297c0f3bf119958012348354afb2c95cbc61f78b4807ff8d1378199f70a4a"}, - {file = "jellyfish-0.11.2-cp39-none-win32.whl", hash = "sha256:335f287613af3b23bb06ab216a78315b5cec877c84748c8927cb4e4e106fbe6f"}, - {file = "jellyfish-0.11.2-cp39-none-win_amd64.whl", hash = "sha256:4de595f3395e15a82f5a45c3265ea8fd594b19f62bbdc7349a3468bba63878a2"}, - {file = "jellyfish-0.11.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26b07f9f957054a99573d51c40118aa1a400354da54e65d24cf22c41840f7a95"}, - {file = "jellyfish-0.11.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1613e6623de71008c4b27250b8f2c5406104beff6487b9fe48af5089e06de2dd"}, - {file = "jellyfish-0.11.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:60e3b8e7e38b85df90f2e04eeed592fe1abc71941ce09e57a8956e21f05ce64f"}, - {file = "jellyfish-0.11.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:90fc8c600252072e48c5bc4e0e4d835c440c9c94f69e1d26a672328e17de3ec8"}, - {file = "jellyfish-0.11.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd1a25a4ff4b75b8a91ba42f6a27a5f423d0cb1ae2f29090a99e0a29afdfefdc"}, - {file = "jellyfish-0.11.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bff57058fb2c9ffefe4b683a4c61de58346603ef699f768f173a2a0637a0c16"}, - {file = "jellyfish-0.11.2-pp38-pypy38_pp73-musllinux_1_1_i686.whl", hash = "sha256:9a92d5dce96711fad362399c8a569923b488ab108f531157ca381decc7096d7d"}, - {file = "jellyfish-0.11.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:16cf6a55433ca4fe8f13d5ba96882a058d19030bcd8c50cdd8f62009c4106c55"}, - {file = "jellyfish-0.11.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:8f9f5f2af653696c29466a94bf0237c64fe21699d9416e0e94ca51863c1ce96a"}, - {file = "jellyfish-0.11.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:661c46b427a1c2a4b4343bda71354a37e897648239f8831d149eb1e7a2bf902c"}, - {file = "jellyfish-0.11.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5204365138dbbd50f634cb246a4812f64d3b3054d32825c16c5176cae2171dcf"}, - {file = "jellyfish-0.11.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c289620b2a1931237b75f4a08f93531f3a9a0125a840c8a780e50688520b266"}, - {file = "jellyfish-0.11.2-pp39-pypy39_pp73-musllinux_1_1_i686.whl", hash = "sha256:b5136535cbf5535090ce99a1f56cfaec43f11f29277faf67241f4bf6f0b578bc"}, - {file = "jellyfish-0.11.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed39ff56c19a4150f412b63e9835354c929f3d8108c586d604cc1c342f0ca34c"}, - {file = "jellyfish-0.11.2.tar.gz", hash = "sha256:654f2b1543b9927c4429bd5d66f98d1f47e6eb9a4e56212e1907fb4eea258c5a"}, -] - -[[package]] -name = "joblib" -version = "1.3.2" -description = "Lightweight pipelining with Python functions" -optional = false -python-versions = ">=3.7" -files = [ - {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, - {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, -] - -[[package]] -name = "kiwisolver" -version = "1.4.5" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, - {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, -] - -[[package]] -name = "lazy-object-proxy" -version = "1.9.0" -description = "A fast and thorough lazy object proxy." -optional = false -python-versions = ">=3.7" -files = [ - {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"}, - {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"}, - {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"}, - {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"}, - {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"}, - {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"}, -] - -[[package]] -name = "log-distance-measures" -version = "1.0.2" -description = "Python package with the implementation of different distance measures between two event logs, from the control-flow, temporal, and queuing perspectives." -optional = false -python-versions = ">=3.9,<3.12" -files = [ - {file = "log_distance_measures-1.0.2-py3-none-any.whl", hash = "sha256:4f53547493e62c6a920942ef5eac2475ca42f6babd1d637971c3a4d5e42cf594"}, - {file = "log_distance_measures-1.0.2.tar.gz", hash = "sha256:379ea9c329454c323f58969fdff0584054fd7bbff2533e9eb53979686d54b8f5"}, -] - -[package.dependencies] -jellyfish = ">=0.11.2,<0.12.0" -pandas = ">=2.0.0,<3.0.0" -pulp = ">=2.7.0,<3.0.0" -scipy = ">=1.10.1,<2.0.0" - -[[package]] -name = "lxml" -version = "4.9.3" -description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" -files = [ - {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"}, - {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"}, - {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"}, - {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"}, - {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"}, - {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"}, - {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"}, - {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, - {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, - {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, - {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, - {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, - {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, - {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, - {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, - {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, - {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, - {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, - {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, - {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, - {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, - {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, - {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, - {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"}, - {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"}, - {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"}, - {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"}, - {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"}, - {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"}, - {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"}, - {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"}, - {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"}, - {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"}, - {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"}, - {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"}, - {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"}, - {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"}, - {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"}, - {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"}, - {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"}, - {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"}, - {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"}, - {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"}, - {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"}, - {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"}, - {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, - {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, -] - -[package.extras] -cssselect = ["cssselect (>=0.7)"] -html5 = ["html5lib"] -htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=0.29.35)"] - -[[package]] -name = "matplotlib" -version = "3.7.2" -description = "Python plotting package" -optional = false -python-versions = ">=3.8" -files = [ - {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de"}, - {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d"}, - {file = "matplotlib-3.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1"}, - {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603"}, - {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201"}, - {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8"}, - {file = "matplotlib-3.7.2-cp310-cp310-win32.whl", hash = "sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a"}, - {file = "matplotlib-3.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1"}, - {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544"}, - {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e"}, - {file = "matplotlib-3.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64"}, - {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24"}, - {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d"}, - {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4"}, - {file = "matplotlib-3.7.2-cp311-cp311-win32.whl", hash = "sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc"}, - {file = "matplotlib-3.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e"}, - {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7"}, - {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391"}, - {file = "matplotlib-3.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca"}, - {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9"}, - {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273"}, - {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117"}, - {file = "matplotlib-3.7.2-cp38-cp38-win32.whl", hash = "sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13"}, - {file = "matplotlib-3.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676"}, - {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608"}, - {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256"}, - {file = "matplotlib-3.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f"}, - {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b"}, - {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e"}, - {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f"}, - {file = "matplotlib-3.7.2-cp39-cp39-win32.whl", hash = "sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20"}, - {file = "matplotlib-3.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c"}, - {file = "matplotlib-3.7.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d"}, - {file = "matplotlib-3.7.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11"}, - {file = "matplotlib-3.7.2.tar.gz", hash = "sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} -kiwisolver = ">=1.0.1" -numpy = ">=1.20" -packaging = ">=20.0" -pillow = ">=6.2.0" -pyparsing = ">=2.3.1,<3.1" -python-dateutil = ">=2.7" - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "memory-profiler" -version = "0.61.0" -description = "A module for monitoring memory usage of a python program" -optional = false -python-versions = ">=3.5" -files = [ - {file = "memory_profiler-0.61.0-py3-none-any.whl", hash = "sha256:400348e61031e3942ad4d4109d18753b2fb08c2f6fb8290671c5513a34182d84"}, - {file = "memory_profiler-0.61.0.tar.gz", hash = "sha256:4e5b73d7864a1d1292fb76a03e82a3e78ef934d06828a698d9dada76da2067b0"}, -] - -[package.dependencies] -psutil = "*" - -[[package]] -name = "networkx" -version = "3.1" -description = "Python package for creating and manipulating graphs and networks" -optional = false -python-versions = ">=3.8" -files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, -] - -[package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] - -[[package]] -name = "numpy" -version = "1.25.2" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.25.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3"}, - {file = "numpy-1.25.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f"}, - {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187"}, - {file = "numpy-1.25.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357"}, - {file = "numpy-1.25.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9"}, - {file = "numpy-1.25.2-cp310-cp310-win32.whl", hash = "sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044"}, - {file = "numpy-1.25.2-cp310-cp310-win_amd64.whl", hash = "sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545"}, - {file = "numpy-1.25.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418"}, - {file = "numpy-1.25.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f"}, - {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2"}, - {file = "numpy-1.25.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf"}, - {file = "numpy-1.25.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364"}, - {file = "numpy-1.25.2-cp311-cp311-win32.whl", hash = "sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d"}, - {file = "numpy-1.25.2-cp311-cp311-win_amd64.whl", hash = "sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4"}, - {file = "numpy-1.25.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3"}, - {file = "numpy-1.25.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926"}, - {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca"}, - {file = "numpy-1.25.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295"}, - {file = "numpy-1.25.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f"}, - {file = "numpy-1.25.2-cp39-cp39-win32.whl", hash = "sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01"}, - {file = "numpy-1.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380"}, - {file = "numpy-1.25.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55"}, - {file = "numpy-1.25.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901"}, - {file = "numpy-1.25.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf"}, - {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"}, -] - -[[package]] -name = "openxes-cli-py" -version = "0.1.15" -description = "" -optional = false -python-versions = ">=3.9,<4.0" -files = [ - {file = "openxes_cli_py-0.1.15-py3-none-any.whl", hash = "sha256:49c8fbc3e80eb9d6dae1f1682eab33e6227d5cb18a445fca67ff56f3d1224b29"}, - {file = "openxes_cli_py-0.1.15.tar.gz", hash = "sha256:23e843884e13c8176a409bb5ba6ddd60d015c6f3b4655320f272399eead88afa"}, -] - -[[package]] -name = "packaging" -version = "23.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, -] - -[[package]] -name = "pandas" -version = "2.1.0" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pandas-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:40dd20439ff94f1b2ed55b393ecee9cb6f3b08104c2c40b0cb7186a2f0046242"}, - {file = "pandas-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4f38e4fedeba580285eaac7ede4f686c6701a9e618d8a857b138a126d067f2f"}, - {file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6a0fe052cf27ceb29be9429428b4918f3740e37ff185658f40d8702f0b3e09"}, - {file = "pandas-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d81e1813191070440d4c7a413cb673052b3b4a984ffd86b8dd468c45742d3cc"}, - {file = "pandas-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:eb20252720b1cc1b7d0b2879ffc7e0542dd568f24d7c4b2347cb035206936421"}, - {file = "pandas-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:38f74ef7ebc0ffb43b3d633e23d74882bce7e27bfa09607f3c5d3e03ffd9a4a5"}, - {file = "pandas-2.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cda72cc8c4761c8f1d97b169661f23a86b16fdb240bdc341173aee17e4d6cedd"}, - {file = "pandas-2.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d97daeac0db8c993420b10da4f5f5b39b01fc9ca689a17844e07c0a35ac96b4b"}, - {file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8c58b1113892e0c8078f006a167cc210a92bdae23322bb4614f2f0b7a4b510f"}, - {file = "pandas-2.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:629124923bcf798965b054a540f9ccdfd60f71361255c81fa1ecd94a904b9dd3"}, - {file = "pandas-2.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:70cf866af3ab346a10debba8ea78077cf3a8cd14bd5e4bed3d41555a3280041c"}, - {file = "pandas-2.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d53c8c1001f6a192ff1de1efe03b31a423d0eee2e9e855e69d004308e046e694"}, - {file = "pandas-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:86f100b3876b8c6d1a2c66207288ead435dc71041ee4aea789e55ef0e06408cb"}, - {file = "pandas-2.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28f330845ad21c11db51e02d8d69acc9035edfd1116926ff7245c7215db57957"}, - {file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9a6ccf0963db88f9b12df6720e55f337447aea217f426a22d71f4213a3099a6"}, - {file = "pandas-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d99e678180bc59b0c9443314297bddce4ad35727a1a2656dbe585fd78710b3b9"}, - {file = "pandas-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b31da36d376d50a1a492efb18097b9101bdbd8b3fbb3f49006e02d4495d4c644"}, - {file = "pandas-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0164b85937707ec7f70b34a6c3a578dbf0f50787f910f21ca3b26a7fd3363437"}, - {file = "pandas-2.1.0.tar.gz", hash = "sha256:62c24c7fc59e42b775ce0679cfa7b14a5f9bfb7643cfbe708c960699e05fb918"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, -] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.1" - -[package.extras] -all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] -aws = ["s3fs (>=2022.05.0)"] -clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] -compression = ["zstandard (>=0.17.0)"] -computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2022.05.0)"] -gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"] -hdf5 = ["tables (>=3.7.0)"] -html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"] -mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] -spss = ["pyreadstat (>=1.1.5)"] -sql-other = ["SQLAlchemy (>=1.4.36)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.8.0)"] - -[[package]] -name = "pandasql" -version = "0.7.3" -description = "sqldf for pandas" -optional = false -python-versions = "*" -files = [ - {file = "pandasql-0.7.3.tar.gz", hash = "sha256:1eb248869086435a7d85281ebd9fe525d69d9d954a0dceb854f71a8d0fd8de69"}, -] - -[package.dependencies] -numpy = "*" -pandas = "*" -sqlalchemy = "*" - -[[package]] -name = "pendulum" -version = "2.1.2" -description = "Python datetimes made easy" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "pendulum-2.1.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:b6c352f4bd32dff1ea7066bd31ad0f71f8d8100b9ff709fb343f3b86cee43efe"}, - {file = "pendulum-2.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:318f72f62e8e23cd6660dbafe1e346950281a9aed144b5c596b2ddabc1d19739"}, - {file = "pendulum-2.1.2-cp35-cp35m-macosx_10_15_x86_64.whl", hash = "sha256:0731f0c661a3cb779d398803655494893c9f581f6488048b3fb629c2342b5394"}, - {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:3481fad1dc3f6f6738bd575a951d3c15d4b4ce7c82dce37cf8ac1483fde6e8b0"}, - {file = "pendulum-2.1.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9702069c694306297ed362ce7e3c1ef8404ac8ede39f9b28b7c1a7ad8c3959e3"}, - {file = "pendulum-2.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fb53ffa0085002ddd43b6ca61a7b34f2d4d7c3ed66f931fe599e1a531b42af9b"}, - {file = "pendulum-2.1.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:c501749fdd3d6f9e726086bf0cd4437281ed47e7bca132ddb522f86a1645d360"}, - {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c807a578a532eeb226150d5006f156632df2cc8c5693d778324b43ff8c515dd0"}, - {file = "pendulum-2.1.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2d1619a721df661e506eff8db8614016f0720ac171fe80dda1333ee44e684087"}, - {file = "pendulum-2.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:f888f2d2909a414680a29ae74d0592758f2b9fcdee3549887779cd4055e975db"}, - {file = "pendulum-2.1.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:e95d329384717c7bf627bf27e204bc3b15c8238fa8d9d9781d93712776c14002"}, - {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4c9c689747f39d0d02a9f94fcee737b34a5773803a64a5fdb046ee9cac7442c5"}, - {file = "pendulum-2.1.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1245cd0075a3c6d889f581f6325dd8404aca5884dea7223a5566c38aab94642b"}, - {file = "pendulum-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:db0a40d8bcd27b4fb46676e8eb3c732c67a5a5e6bfab8927028224fbced0b40b"}, - {file = "pendulum-2.1.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f5e236e7730cab1644e1b87aca3d2ff3e375a608542e90fe25685dae46310116"}, - {file = "pendulum-2.1.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:de42ea3e2943171a9e95141f2eecf972480636e8e484ccffaf1e833929e9e052"}, - {file = "pendulum-2.1.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7c5ec650cb4bec4c63a89a0242cc8c3cebcec92fcfe937c417ba18277d8560be"}, - {file = "pendulum-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:33fb61601083f3eb1d15edeb45274f73c63b3c44a8524703dc143f4212bf3269"}, - {file = "pendulum-2.1.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:29c40a6f2942376185728c9a0347d7c0f07905638c83007e1d262781f1e6953a"}, - {file = "pendulum-2.1.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:94b1fc947bfe38579b28e1cccb36f7e28a15e841f30384b5ad6c5e31055c85d7"}, - {file = "pendulum-2.1.2.tar.gz", hash = "sha256:b06a0ca1bfe41c990bbf0c029f0b6501a7f2ec4e38bfec730712015e8860f207"}, -] - -[package.dependencies] -python-dateutil = ">=2.6,<3.0" -pytzdata = ">=2020.1" - -[[package]] -name = "pillow" -version = "10.0.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, - {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, - {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, - {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, - {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, - {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, - {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, - {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "pix-framework" -version = "0.13.5" -description = "Process Improvement Explorer Framework contains process discovery and improvement modules of the Process Improvement Explorer project." -optional = false -python-versions = ">=3.9,<3.12" -files = [ - {file = "pix_framework-0.13.5-py3-none-any.whl", hash = "sha256:39c863368f928776b173e90047e04f38e796cf0eb5d8e278158a096e5e02e9d9"}, - {file = "pix_framework-0.13.5.tar.gz", hash = "sha256:dec86b2876188715a72aad507be83a1080e6c29e8e86e2279438a79e83d26cd6"}, -] - -[package.dependencies] -lxml = ">=4.9.3,<5.0.0" -networkx = ">=3.1,<4.0" -pandas = ">=2.0.1,<3.0.0" -pandasql = ">=0.7.3,<0.8.0" -polars = ">=0.18.15,<0.19.0" -pyarrow = ">=12.0.1,<13.0.0" -pytz = ">=2023.3,<2024.0" -scikit-learn = ">=1.3.0,<2.0.0" -scipy = ">=1.10.1,<2.0.0" -wittgenstein = ">=0.3.4,<0.4.0" - -[[package]] -name = "platformdirs" -version = "3.10.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.10.0-py3-none-any.whl", hash = "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d"}, - {file = "platformdirs-3.10.0.tar.gz", hash = "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d"}, -] - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] - -[[package]] -name = "pluggy" -version = "1.3.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "polars" -version = "0.18.15" -description = "Blazingly fast DataFrame library" -optional = false -python-versions = ">=3.8" -files = [ - {file = "polars-0.18.15-cp38-abi3-macosx_10_7_x86_64.whl", hash = "sha256:f7a4e4108efd2ab728249f792c89d2e7baffd65e0d6cd9f09b6c395363e3fbea"}, - {file = "polars-0.18.15-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:7dee57ecc6f6151f1f9b960f6baa5032ba5e967d3a0dc0cda830be20745be58c"}, - {file = "polars-0.18.15-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c713610c7c144f41987092e2ab2372937933fbdc494a65c08eea251af91b60f"}, - {file = "polars-0.18.15-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c48d248891cfe62ee58f852dabf54c6bd85d4fc83b3b61b759534be0d3b6ec81"}, - {file = "polars-0.18.15-cp38-abi3-win_amd64.whl", hash = "sha256:a2f1e3ad546b98601d06340606e90a8788c35e064b4d82d27301069ce744086e"}, - {file = "polars-0.18.15.tar.gz", hash = "sha256:0c483fc1cfb25d07443c0d51eff9a18b94ccdbf6b252212767524412667ca870"}, -] - -[package.extras] -adbc = ["adbc_driver_sqlite"] -all = ["polars[adbc,cloudpickle,connectorx,deltalake,fsspec,matplotlib,numpy,pandas,pyarrow,pydantic,sqlalchemy,timezone,xlsx2csv,xlsxwriter]"] -cloudpickle = ["cloudpickle"] -connectorx = ["connectorx"] -deltalake = ["deltalake (>=0.10.0)"] -fsspec = ["fsspec"] -matplotlib = ["matplotlib"] -numpy = ["numpy (>=1.16.0)"] -pandas = ["pandas", "pyarrow (>=7.0.0)"] -pyarrow = ["pyarrow (>=7.0.0)"] -pydantic = ["pydantic"] -sqlalchemy = ["pandas", "sqlalchemy"] -timezone = ["backports.zoneinfo", "tzdata"] -xlsx2csv = ["xlsx2csv (>=0.8.0)"] -xlsxwriter = ["xlsxwriter"] - -[[package]] -name = "prosimos" -version = "2.0.3" -description = "" -optional = false -python-versions = ">=3.9,<3.12" -files = [ - {file = "prosimos-2.0.3-py3-none-any.whl", hash = "sha256:add9595f57daab45b4aed4a57057f22d84a116b8b8fd8eb212ef1dd26ed8740b"}, - {file = "prosimos-2.0.3.tar.gz", hash = "sha256:644a3f73072da11419754282f05ad5320703f74ada36338176324f71f332cdf4"}, -] - -[package.dependencies] -click = ">=8.1.3,<9.0.0" -numpy = ">=1.24.3,<2.0.0" -pandas = ">=2.0.1,<3.0.0" -pix-framework = ">=0.13.0,<0.14.0" -pylint = ">=2.17.4,<3.0.0" -python-dateutil = ">=2.8.2,<3.0.0" -pytz = ">=2023.3,<2024.0" -scipy = ">=1.10.1,<2.0.0" - -[[package]] -name = "psutil" -version = "5.9.5" -description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, - {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, - {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, - {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, - {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, - {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, - {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, - {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, -] - -[package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] - -[[package]] -name = "pulp" -version = "2.7.0" -description = "PuLP is an LP modeler written in python. PuLP can generate MPS or LP files and call GLPK, COIN CLP/CBC, CPLEX, and GUROBI to solve linear problems." -optional = false -python-versions = "*" -files = [ - {file = "PuLP-2.7.0-py3-none-any.whl", hash = "sha256:b6de42c929e80325bf44cc7a2997f02535440800c376b9eb8cb7b4670ed53769"}, - {file = "PuLP-2.7.0.tar.gz", hash = "sha256:e73ee6b32d639c9b8cf4b4aded334ba158be5f8313544e056f796ace0a10ae63"}, -] - -[[package]] -name = "py-cpuinfo" -version = "9.0.0" -description = "Get CPU info with pure Python" -optional = false -python-versions = "*" -files = [ - {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, - {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, -] - -[[package]] -name = "py4j" -version = "0.10.9.7" -description = "Enables Python programs to dynamically access arbitrary Java objects" -optional = false -python-versions = "*" -files = [ - {file = "py4j-0.10.9.7-py2.py3-none-any.whl", hash = "sha256:85defdfd2b2376eb3abf5ca6474b51ab7e0de341c75a02f46dc9b5976f5a5c1b"}, - {file = "py4j-0.10.9.7.tar.gz", hash = "sha256:0b6e5315bb3ada5cf62ac651d107bb2ebc02def3dee9d9548e3baac644ea8dbb"}, -] - -[[package]] -name = "pyarrow" -version = "12.0.1" -description = "Python library for Apache Arrow" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyarrow-12.0.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:6d288029a94a9bb5407ceebdd7110ba398a00412c5b0155ee9813a40d246c5df"}, - {file = "pyarrow-12.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345e1828efdbd9aa4d4de7d5676778aba384a2c3add896d995b23d368e60e5af"}, - {file = "pyarrow-12.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d6009fdf8986332b2169314da482baed47ac053311c8934ac6651e614deacd6"}, - {file = "pyarrow-12.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d3c4cbbf81e6dd23fe921bc91dc4619ea3b79bc58ef10bce0f49bdafb103daf"}, - {file = "pyarrow-12.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:cdacf515ec276709ac8042c7d9bd5be83b4f5f39c6c037a17a60d7ebfd92c890"}, - {file = "pyarrow-12.0.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:749be7fd2ff260683f9cc739cb862fb11be376de965a2a8ccbf2693b098db6c7"}, - {file = "pyarrow-12.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6895b5fb74289d055c43db3af0de6e16b07586c45763cb5e558d38b86a91e3a7"}, - {file = "pyarrow-12.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1887bdae17ec3b4c046fcf19951e71b6a619f39fa674f9881216173566c8f718"}, - {file = "pyarrow-12.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c9cb8eeabbadf5fcfc3d1ddea616c7ce893db2ce4dcef0ac13b099ad7ca082"}, - {file = "pyarrow-12.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:ce4aebdf412bd0eeb800d8e47db854f9f9f7e2f5a0220440acf219ddfddd4f63"}, - {file = "pyarrow-12.0.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:e0d8730c7f6e893f6db5d5b86eda42c0a130842d101992b581e2138e4d5663d3"}, - {file = "pyarrow-12.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43364daec02f69fec89d2315f7fbfbeec956e0d991cbbef471681bd77875c40f"}, - {file = "pyarrow-12.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:051f9f5ccf585f12d7de836e50965b3c235542cc896959320d9776ab93f3b33d"}, - {file = "pyarrow-12.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:be2757e9275875d2a9c6e6052ac7957fbbfc7bc7370e4a036a9b893e96fedaba"}, - {file = "pyarrow-12.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:cf812306d66f40f69e684300f7af5111c11f6e0d89d6b733e05a3de44961529d"}, - {file = "pyarrow-12.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:459a1c0ed2d68671188b2118c63bac91eaef6fc150c77ddd8a583e3c795737bf"}, - {file = "pyarrow-12.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85e705e33eaf666bbe508a16fd5ba27ca061e177916b7a317ba5a51bee43384c"}, - {file = "pyarrow-12.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9120c3eb2b1f6f516a3b7a9714ed860882d9ef98c4b17edcdc91d95b7528db60"}, - {file = "pyarrow-12.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:c780f4dc40460015d80fcd6a6140de80b615349ed68ef9adb653fe351778c9b3"}, - {file = "pyarrow-12.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:a3c63124fc26bf5f95f508f5d04e1ece8cc23a8b0af2a1e6ab2b1ec3fdc91b24"}, - {file = "pyarrow-12.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b13329f79fa4472324f8d32dc1b1216616d09bd1e77cfb13104dec5463632c36"}, - {file = "pyarrow-12.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb656150d3d12ec1396f6dde542db1675a95c0cc8366d507347b0beed96e87ca"}, - {file = "pyarrow-12.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6251e38470da97a5b2e00de5c6a049149f7b2bd62f12fa5dbb9ac674119ba71a"}, - {file = "pyarrow-12.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:3de26da901216149ce086920547dfff5cd22818c9eab67ebc41e863a5883bac7"}, - {file = "pyarrow-12.0.1.tar.gz", hash = "sha256:cce317fc96e5b71107bf1f9f184d5e54e2bd14bbf3f9a3d62819961f0af86fec"}, -] - -[package.dependencies] -numpy = ">=1.16.6" - -[[package]] -name = "pydantic" -version = "2.3.0" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, - {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, -] - -[package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.6.3" -typing-extensions = ">=4.6.1" - -[package.extras] -email = ["email-validator (>=2.0.0)"] - -[[package]] -name = "pydantic-core" -version = "2.6.3" -description = "" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, - {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, - {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, - {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, - {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, - {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, - {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, - {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, - {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, - {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, - {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, - {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, - {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, - {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, - {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, - {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pylint" -version = "2.17.5" -description = "python code static checker" -optional = false -python-versions = ">=3.7.2" -files = [ - {file = "pylint-2.17.5-py3-none-any.whl", hash = "sha256:73995fb8216d3bed149c8d51bba25b2c52a8251a2c8ac846ec668ce38fab5413"}, - {file = "pylint-2.17.5.tar.gz", hash = "sha256:f7b601cbc06fef7e62a754e2b41294c2aa31f1cb659624b9a85bcba29eaf8252"}, -] - -[package.dependencies] -astroid = ">=2.15.6,<=2.17.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, -] -isort = ">=4.2.5,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pyparsing" -version = "3.0.9" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, - {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - -[[package]] -name = "pytest" -version = "7.4.0" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, - {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-benchmark" -version = "4.0.0" -description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-benchmark-4.0.0.tar.gz", hash = "sha256:fb0785b83efe599a6a956361c0691ae1dbb5318018561af10f3e915caa0048d1"}, - {file = "pytest_benchmark-4.0.0-py3-none-any.whl", hash = "sha256:fdb7db64e31c8b277dff9850d2a2556d8b60bcb0ea6524e36e28ffd7c87f71d6"}, -] - -[package.dependencies] -py-cpuinfo = "*" -pytest = ">=3.8" - -[package.extras] -aspect = ["aspectlib"] -elasticsearch = ["elasticsearch"] -histogram = ["pygal", "pygaljs"] - -[[package]] -name = "pytest-cov" -version = "4.1.0" -description = "Pytest plugin for measuring coverage." -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, - {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, -] - -[package.dependencies] -coverage = {version = ">=5.2.1", extras = ["toml"]} -pytest = ">=4.6" - -[package.extras] -testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-dotenv" -version = "1.0.0" -description = "Read key-value pairs from a .env file and set them as environment variables" -optional = false -python-versions = ">=3.8" -files = [ - {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, - {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, -] - -[package.extras] -cli = ["click (>=5.0)"] - -[[package]] -name = "python-multipart" -version = "0.0.6" -description = "A streaming multipart parser for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"}, - {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"}, -] - -[package.extras] -dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] - -[[package]] -name = "pytz" -version = "2023.3" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, - {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, -] - -[[package]] -name = "pytzdata" -version = "2020.1" -description = "The Olson timezone database for Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pytzdata-2020.1-py2.py3-none-any.whl", hash = "sha256:e1e14750bcf95016381e4d472bad004eef710f2d6417240904070b3d6654485f"}, - {file = "pytzdata-2020.1.tar.gz", hash = "sha256:3efa13b335a00a8de1d345ae41ec78dd11c9f8807f522d39850f2dd828681540"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.1" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "scikit-learn" -version = "1.3.0" -description = "A set of python modules for machine learning and data mining" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit-learn-1.3.0.tar.gz", hash = "sha256:8be549886f5eda46436b6e555b0e4873b4f10aa21c07df45c4bc1735afbccd7a"}, - {file = "scikit_learn-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:981287869e576d42c682cf7ca96af0c6ac544ed9316328fd0d9292795c742cf5"}, - {file = "scikit_learn-1.3.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:436aaaae2c916ad16631142488e4c82f4296af2404f480e031d866863425d2a2"}, - {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7e28d8fa47a0b30ae1bd7a079519dd852764e31708a7804da6cb6f8b36e3630"}, - {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80c08834a473d08a204d966982a62e11c976228d306a2648c575e3ead12111"}, - {file = "scikit_learn-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:552fd1b6ee22900cf1780d7386a554bb96949e9a359999177cf30211e6b20df6"}, - {file = "scikit_learn-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79970a6d759eb00a62266a31e2637d07d2d28446fca8079cf9afa7c07b0427f8"}, - {file = "scikit_learn-1.3.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:850a00b559e636b23901aabbe79b73dc604b4e4248ba9e2d6e72f95063765603"}, - {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee04835fb016e8062ee9fe9074aef9b82e430504e420bff51e3e5fffe72750ca"}, - {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d953531f5d9f00c90c34fa3b7d7cfb43ecff4c605dac9e4255a20b114a27369"}, - {file = "scikit_learn-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:151ac2bf65ccf363664a689b8beafc9e6aae36263db114b4ca06fbbbf827444a"}, - {file = "scikit_learn-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a885a9edc9c0a341cab27ec4f8a6c58b35f3d449c9d2503a6fd23e06bbd4f6a"}, - {file = "scikit_learn-1.3.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9877af9c6d1b15486e18a94101b742e9d0d2f343d35a634e337411ddb57783f3"}, - {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c470f53cea065ff3d588050955c492793bb50c19a92923490d18fcb637f6383a"}, - {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd6e2d7389542eae01077a1ee0318c4fec20c66c957f45c7aac0c6eb0fe3c612"}, - {file = "scikit_learn-1.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:3a11936adbc379a6061ea32fa03338d4ca7248d86dd507c81e13af428a5bc1db"}, - {file = "scikit_learn-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:998d38fcec96584deee1e79cd127469b3ad6fefd1ea6c2dfc54e8db367eb396b"}, - {file = "scikit_learn-1.3.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ded35e810438a527e17623ac6deae3b360134345b7c598175ab7741720d7ffa7"}, - {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e8102d5036e28d08ab47166b48c8d5e5810704daecf3a476a4282d562be9a28"}, - {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7617164951c422747e7c32be4afa15d75ad8044f42e7d70d3e2e0429a50e6718"}, - {file = "scikit_learn-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d54fb9e6038284548072df22fd34777e434153f7ffac72c8596f2d6987110dd"}, -] - -[package.dependencies] -joblib = ">=1.1.1" -numpy = ">=1.17.3" -scipy = ">=1.5.0" -threadpoolctl = ">=2.0.0" - -[package.extras] -benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] - -[[package]] -name = "scipy" -version = "1.11.2" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = "<3.13,>=3.9" -files = [ - {file = "scipy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2b997a5369e2d30c97995dcb29d638701f8000d04df01b8e947f206e5d0ac788"}, - {file = "scipy-1.11.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:95763fbda1206bec41157582bea482f50eb3702c85fffcf6d24394b071c0e87a"}, - {file = "scipy-1.11.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e367904a0fec76433bf3fbf3e85bf60dae8e9e585ffd21898ab1085a29a04d16"}, - {file = "scipy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d690e1ca993c8f7ede6d22e5637541217fc6a4d3f78b3672a6fe454dbb7eb9a7"}, - {file = "scipy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d2b813bfbe8dec6a75164523de650bad41f4405d35b0fa24c2c28ae07fcefb20"}, - {file = "scipy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:afdb0d983f6135d50770dd979df50bf1c7f58b5b33e0eb8cf5c73c70600eae1d"}, - {file = "scipy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d9886f44ef8c9e776cb7527fb01455bf4f4a46c455c4682edc2c2cc8cd78562"}, - {file = "scipy-1.11.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1342ca385c673208f32472830c10110a9dcd053cf0c4b7d4cd7026d0335a6c1d"}, - {file = "scipy-1.11.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b133f237bd8ba73bad51bc12eb4f2d84cbec999753bf25ba58235e9fc2096d80"}, - {file = "scipy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aeb87661de987f8ec56fa6950863994cd427209158255a389fc5aea51fa7055"}, - {file = "scipy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:90d3b1364e751d8214e325c371f0ee0dd38419268bf4888b2ae1040a6b266b2a"}, - {file = "scipy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:f73102f769ee06041a3aa26b5841359b1a93cc364ce45609657751795e8f4a4a"}, - {file = "scipy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa4909c6c20c3d91480533cddbc0e7c6d849e7d9ded692918c76ce5964997898"}, - {file = "scipy-1.11.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ac74b1512d38718fb6a491c439aa7b3605b96b1ed3be6599c17d49d6c60fca18"}, - {file = "scipy-1.11.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8425fa963a32936c9773ee3ce44a765d8ff67eed5f4ac81dc1e4a819a238ee9"}, - {file = "scipy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:542a757e2a6ec409e71df3d8fd20127afbbacb1c07990cb23c5870c13953d899"}, - {file = "scipy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea932570b1c2a30edafca922345854ff2cd20d43cd9123b6dacfdecebfc1a80b"}, - {file = "scipy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:4447ad057d7597476f9862ecbd9285bbf13ba9d73ce25acfa4e4b11c6801b4c9"}, - {file = "scipy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b0620240ef445b5ddde52460e6bc3483b7c9c750275369379e5f609a1050911c"}, - {file = "scipy-1.11.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f28f1f6cfeb48339c192efc6275749b2a25a7e49c4d8369a28b6591da02fbc9a"}, - {file = "scipy-1.11.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:214cdf04bbae7a54784f8431f976704ed607c4bc69ba0d5d5d6a9df84374df76"}, - {file = "scipy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10eb6af2f751aa3424762948e5352f707b0dece77288206f227864ddf675aca0"}, - {file = "scipy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0f3261f14b767b316d7137c66cc4f33a80ea05841b9c87ad83a726205b901423"}, - {file = "scipy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:2c91cf049ffb5575917f2a01da1da082fd24ed48120d08a6e7297dfcac771dcd"}, - {file = "scipy-1.11.2.tar.gz", hash = "sha256:b29318a5e39bd200ca4381d80b065cdf3076c7d7281c5e36569e99273867f61d"}, -] - -[package.dependencies] -numpy = ">=1.21.6,<1.28.0" - -[package.extras] -dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] -doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - -[[package]] -name = "seaborn" -version = "0.12.2" -description = "Statistical data visualization" -optional = false -python-versions = ">=3.7" -files = [ - {file = "seaborn-0.12.2-py3-none-any.whl", hash = "sha256:ebf15355a4dba46037dfd65b7350f014ceb1f13c05e814eda2c9f5fd731afc08"}, - {file = "seaborn-0.12.2.tar.gz", hash = "sha256:374645f36509d0dcab895cba5b47daf0586f77bfe3b36c97c607db7da5be0139"}, -] - -[package.dependencies] -matplotlib = ">=3.1,<3.6.1 || >3.6.1" -numpy = ">=1.17,<1.24.0 || >1.24.0" -pandas = ">=0.25" - -[package.extras] -dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"] -docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] -stats = ["scipy (>=1.3)", "statsmodels (>=0.10)"] - -[[package]] -name = "setuptools" -version = "67.8.0" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"}, - {file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "snakeviz" -version = "2.2.0" -description = "A web-based viewer for Python profiler output" -optional = false -python-versions = ">=3.7" -files = [ - {file = "snakeviz-2.2.0-py2.py3-none-any.whl", hash = "sha256:569e2d71c47f80a886aa6e70d6405cb6d30aa3520969ad956b06f824c5f02b8e"}, - {file = "snakeviz-2.2.0.tar.gz", hash = "sha256:7bfd00be7ae147eb4a170a471578e1cd3f41f803238958b6b8efcf2c698a6aa9"}, -] - -[package.dependencies] -tornado = ">=2.0" - -[[package]] -name = "sqlalchemy" -version = "2.0.20" -description = "Database Abstraction Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-2.0.20-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759b51346aa388c2e606ee206c0bc6f15a5299f6174d1e10cadbe4530d3c7a98"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1506e988ebeaaf316f183da601f24eedd7452e163010ea63dbe52dc91c7fc70e"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5768c268df78bacbde166b48be788b83dddaa2a5974b8810af422ddfe68a9bc8"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3f0dd6d15b6dc8b28a838a5c48ced7455c3e1fb47b89da9c79cc2090b072a50"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:243d0fb261f80a26774829bc2cee71df3222587ac789b7eaf6555c5b15651eed"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6eb6d77c31e1bf4268b4d61b549c341cbff9842f8e115ba6904249c20cb78a61"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-win32.whl", hash = "sha256:bcb04441f370cbe6e37c2b8d79e4af9e4789f626c595899d94abebe8b38f9a4d"}, - {file = "SQLAlchemy-2.0.20-cp310-cp310-win_amd64.whl", hash = "sha256:d32b5ffef6c5bcb452723a496bad2d4c52b346240c59b3e6dba279f6dcc06c14"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd81466bdbc82b060c3c110b2937ab65ace41dfa7b18681fdfad2f37f27acdd7"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fe7d61dc71119e21ddb0094ee994418c12f68c61b3d263ebaae50ea8399c4d4"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4e571af672e1bb710b3cc1a9794b55bce1eae5aed41a608c0401885e3491179"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3364b7066b3c7f4437dd345d47271f1251e0cfb0aba67e785343cdbdb0fff08c"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1be86ccea0c965a1e8cd6ccf6884b924c319fcc85765f16c69f1ae7148eba64b"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1d35d49a972649b5080557c603110620a86aa11db350d7a7cb0f0a3f611948a0"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-win32.whl", hash = "sha256:27d554ef5d12501898d88d255c54eef8414576f34672e02fe96d75908993cf53"}, - {file = "SQLAlchemy-2.0.20-cp311-cp311-win_amd64.whl", hash = "sha256:411e7f140200c02c4b953b3dbd08351c9f9818d2bd591b56d0fa0716bd014f1e"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3c6aceebbc47db04f2d779db03afeaa2c73ea3f8dcd3987eb9efdb987ffa09a3"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d3f175410a6db0ad96b10bfbb0a5530ecd4fcf1e2b5d83d968dd64791f810ed"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea8186be85da6587456c9ddc7bf480ebad1a0e6dcbad3967c4821233a4d4df57"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c3d99ba99007dab8233f635c32b5cd24fb1df8d64e17bc7df136cedbea427897"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:76fdfc0f6f5341987474ff48e7a66c3cd2b8a71ddda01fa82fedb180b961630a"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-win32.whl", hash = "sha256:d3793dcf5bc4d74ae1e9db15121250c2da476e1af8e45a1d9a52b1513a393459"}, - {file = "SQLAlchemy-2.0.20-cp37-cp37m-win_amd64.whl", hash = "sha256:79fde625a0a55220d3624e64101ed68a059c1c1f126c74f08a42097a72ff66a9"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:599ccd23a7146e126be1c7632d1d47847fa9f333104d03325c4e15440fc7d927"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1a58052b5a93425f656675673ef1f7e005a3b72e3f2c91b8acca1b27ccadf5f4"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79543f945be7a5ada9943d555cf9b1531cfea49241809dd1183701f94a748624"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63e73da7fb030ae0a46a9ffbeef7e892f5def4baf8064786d040d45c1d6d1dc5"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ce5e81b800a8afc870bb8e0a275d81957e16f8c4b62415a7b386f29a0cb9763"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb0d3e94c2a84215532d9bcf10229476ffd3b08f481c53754113b794afb62d14"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-win32.whl", hash = "sha256:8dd77fd6648b677d7742d2c3cc105a66e2681cc5e5fb247b88c7a7b78351cf74"}, - {file = "SQLAlchemy-2.0.20-cp38-cp38-win_amd64.whl", hash = "sha256:6f8a934f9dfdf762c844e5164046a9cea25fabbc9ec865c023fe7f300f11ca4a"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:26a3399eaf65e9ab2690c07bd5cf898b639e76903e0abad096cd609233ce5208"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4cde2e1096cbb3e62002efdb7050113aa5f01718035ba9f29f9d89c3758e7e4e"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b09ba72e4e6d341bb5bdd3564f1cea6095d4c3632e45dc69375a1dbe4e26ec"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b74eeafaa11372627ce94e4dc88a6751b2b4d263015b3523e2b1e57291102f0"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:77d37c1b4e64c926fa3de23e8244b964aab92963d0f74d98cbc0783a9e04f501"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:eefebcc5c555803065128401a1e224a64607259b5eb907021bf9b175f315d2a6"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-win32.whl", hash = "sha256:3423dc2a3b94125094897118b52bdf4d37daf142cbcf26d48af284b763ab90e9"}, - {file = "SQLAlchemy-2.0.20-cp39-cp39-win_amd64.whl", hash = "sha256:5ed61e3463021763b853628aef8bc5d469fe12d95f82c74ef605049d810f3267"}, - {file = "SQLAlchemy-2.0.20-py3-none-any.whl", hash = "sha256:63a368231c53c93e2b67d0c5556a9836fdcd383f7e3026a39602aad775b14acf"}, - {file = "SQLAlchemy-2.0.20.tar.gz", hash = "sha256:ca8a5ff2aa7f3ade6c498aaafce25b1eaeabe4e42b73e25519183e4566a16fc6"}, -] - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\""} -typing-extensions = ">=4.2.0" - -[package.extras] -aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx-oracle (>=7)"] -oracle-oracledb = ["oracledb (>=1.0.1)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.29.1)"] -postgresql-psycopg = ["psycopg (>=3.0.7)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] -pymysql = ["pymysql"] -sqlcipher = ["sqlcipher3-binary"] - -[[package]] -name = "statistics" -version = "1.0.3.5" -description = "A Python 2.* port of 3.4 Statistics Module" -optional = false -python-versions = "*" -files = [ - {file = "statistics-1.0.3.5.tar.gz", hash = "sha256:2dc379b80b07bf2ddd5488cad06b2b9531da4dd31edb04dc9ec0dc226486c138"}, -] - -[package.dependencies] -docutils = ">=0.3" - -[[package]] -name = "threadpoolctl" -version = "3.2.0" -description = "threadpoolctl" -optional = false -python-versions = ">=3.8" -files = [ - {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"}, - {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.12.1" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomlkit-0.12.1-py3-none-any.whl", hash = "sha256:712cbd236609acc6a3e2e97253dfc52d4c2082982a88f61b640ecf0817eab899"}, - {file = "tomlkit-0.12.1.tar.gz", hash = "sha256:38e1ff8edb991273ec9f6181244a6a391ac30e9f5098e7535640ea6be97a7c86"}, -] - -[[package]] -name = "tornado" -version = "6.3.3" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">= 3.8" -files = [ - {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d"}, - {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17"}, - {file = "tornado-6.3.3-cp38-abi3-win32.whl", hash = "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3"}, - {file = "tornado-6.3.3-cp38-abi3-win_amd64.whl", hash = "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5"}, - {file = "tornado-6.3.3.tar.gz", hash = "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"}, -] - -[[package]] -name = "tqdm" -version = "4.66.1" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, - {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false -python-versions = ">=3.7" -files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, -] - -[[package]] -name = "tzdata" -version = "2023.3" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, -] - -[[package]] -name = "urllib3" -version = "2.0.4" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.7" -files = [ - {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"}, - {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "wittgenstein" -version = "0.3.4" -description = "Ruleset covering algorithms for explainable machine learning" -optional = false -python-versions = "*" -files = [ - {file = "wittgenstein-0.3.4-py3-none-any.whl", hash = "sha256:99c771d1917ecbd967b7be5bd008789f95c378207456c5a3a99d3a069f69011e"}, - {file = "wittgenstein-0.3.4.tar.gz", hash = "sha256:ba1715b74c97ed260abf6df850f3d1739b2d4ba45b9306883e65c8f83c683bcc"}, -] - -[package.dependencies] -numpy = "*" -pandas = "*" - -[[package]] -name = "wrapt" -version = "1.15.0" -description = "Module for decorators, wrappers and monkey patching." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" -files = [ - {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, - {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, - {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, - {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, - {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, - {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, - {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, - {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, - {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, - {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, - {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, - {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, - {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, - {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, - {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, - {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, - {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, - {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, - {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, - {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, - {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, - {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, - {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, - {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, - {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, - {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, - {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, - {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, - {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, - {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, - {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, - {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, - {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, - {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, - {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, - {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, - {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, - {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, - {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, - {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, - {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, - {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, - {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, - {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, - {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, - {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, - {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, - {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, - {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, - {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, - {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, - {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, - {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, - {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, - {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, - {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, - {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, - {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, - {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, - {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, - {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, - {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, - {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, - {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, - {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, - {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, -] - -[[package]] -name = "xmltodict" -version = "0.13.0" -description = "Makes working with XML feel like you are working with JSON" -optional = false -python-versions = ">=3.4" -files = [ - {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, - {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, -] - -[[package]] -name = "zipp" -version = "3.16.2" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"}, - {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] - -[metadata] -lock-version = "2.0" -python-versions = "^3.9,<3.12" -content-hash = "df83035f36e8917cd37c158c2eaa17c33aaaa44c8361f8803ad470799a79472f" diff --git a/pyproject.toml b/pyproject.toml index 11fb22d0..70a0141e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "simod" -version = "3.6.13" +version = "5.1.6" authors = [ "Ihar Suvorau ", "David Chapela ", @@ -18,27 +18,27 @@ packages = [{ include = "simod", from = "src" }] python = "^3.9,<3.12" click = "^8.1.3" hyperopt = "^0.2.7" -lxml = "^4.9.1" +lxml = "^5.3.0" matplotlib = "^3.6.0" -networkx = "^3.1" -numpy = "^1.23.4" +networkx = "^3.2.1" +numpy = "^1.24.23" pandas = "^2.1.0" -pendulum = "^2.1.2" +pendulum = "^3.0.0" pydantic = "^2.3.0" python-dotenv = "^1.0.0" -python-multipart = "^0.0.6" -pytz = "^2023.3" +python-multipart = "^0.0.12" +pytz = "^2024.2" PyYAML = "^6.0" requests = "^2.28.2" -scipy = "^1.9.2" +scipy = "^1.13.0" statistics = "^1.0.3.5" tqdm = "^4.64.1" xmltodict = "^0.13.0" -prosimos = "^2.0.3" -extraneous-activity-delays = "^2.1.19" +prosimos = "^2.0.6" +extraneous-activity-delays = "^2.1.21" openxes-cli-py = "^0.1.15" -pix-framework = "^0.13.5" -log-distance-measures = "^1.0.2" +pix-framework = "^0.13.17" +log-distance-measures = "^2.0.0" [tool.poetry.group.dev.dependencies] pytest = "^7.1.3" diff --git a/resources/config/benchmark/benchmark_diff.yml b/resources/config/benchmark/benchmark_diff.yml new file mode 100644 index 00000000..49c2cd3d --- /dev/null +++ b/resources/config/benchmark/benchmark_diff.yml @@ -0,0 +1,94 @@ +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../../event_logs/BPIC_2012_W_train.csv.gz + # Event log to evaluate the discovered BPS model with + test_log_path: ../../event_logs/BPIC_2012_W_test.csv.gz + # Use observed arrival distributions + use_observed_arrival_distribution: false + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + start_time: "start_time" + end_time: "end_time" + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + multitasking: false + enable_time_concurrency_threshold: 0.5 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 30 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 40 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 \ No newline at end of file diff --git a/resources/config/benchmark/benchmark_diff_data_aware.yml b/resources/config/benchmark/benchmark_diff_data_aware.yml new file mode 100644 index 00000000..1c153804 --- /dev/null +++ b/resources/config/benchmark/benchmark_diff_data_aware.yml @@ -0,0 +1,98 @@ +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../../event_logs/BPIC_2012_W_train.csv.gz + # Event log to evaluate the discovered BPS model with + test_log_path: ../../event_logs/BPIC_2012_W_test.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + start_time: "start_time" + end_time: "end_time" + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: true +################# +# Preprocessing # +################# +preprocessing: + multitasking: false + enable_time_concurrency_threshold: 0.5 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 30 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true + # Discover data-aware branching rules, i.e., BPMN decision points based on value of data attributes + discover_branch_rules: true + # Minimum f-score value to consider the discovered data-aware branching rules + f_score: + - 0.3 + - 0.9 +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 40 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 \ No newline at end of file diff --git a/resources/config/benchmark/benchmark_diff_extr.yml b/resources/config/benchmark/benchmark_diff_extr.yml new file mode 100644 index 00000000..afb32fde --- /dev/null +++ b/resources/config/benchmark/benchmark_diff_extr.yml @@ -0,0 +1,102 @@ +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../../event_logs/BPIC_2012_W_train.csv.gz + # Event log to evaluate the discovered BPS model with + test_log_path: ../../event_logs/BPIC_2012_W_test.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + start_time: "start_time" + end_time: "end_time" + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + multitasking: false + enable_time_concurrency_threshold: 0.5 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 30 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 40 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay (naive or eclipse-aware) + discovery_method: eclipse-aware + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: relative_emd + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 20 diff --git a/resources/config/benchmark/benchmark_fuzz.yml b/resources/config/benchmark/benchmark_fuzz.yml new file mode 100644 index 00000000..8c020907 --- /dev/null +++ b/resources/config/benchmark/benchmark_fuzz.yml @@ -0,0 +1,85 @@ +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../../event_logs/BPIC_2012_W_train.csv.gz + # Event log to evaluate the discovered BPS model with + test_log_path: ../../event_logs/BPIC_2012_W_test.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + start_time: "start_time" + end_time: "end_time" + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + multitasking: false + enable_time_concurrency_threshold: 0.5 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 30 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 40 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated_fuzzy + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + fuzzy_angle: + - 0.1 + - 0.9 diff --git a/resources/config/benchmark/benchmark_fuzz_extr.yml b/resources/config/benchmark/benchmark_fuzz_extr.yml new file mode 100644 index 00000000..dddd2da0 --- /dev/null +++ b/resources/config/benchmark/benchmark_fuzz_extr.yml @@ -0,0 +1,95 @@ +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../../event_logs/BPIC_2012_W_train.csv.gz + # Event log to evaluate the discovered BPS model with + test_log_path: ../../event_logs/BPIC_2012_W_test.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + start_time: "start_time" + end_time: "end_time" + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + multitasking: false + enable_time_concurrency_threshold: 0.5 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 30 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 40 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated_fuzzy + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + fuzzy_angle: + - 0.1 + - 0.9 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay (naive or eclipse-aware) + discovery_method: eclipse-aware + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: relative_emd + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 20 diff --git a/resources/config/benchmark/benchmark_pool.yml b/resources/config/benchmark/benchmark_pool.yml new file mode 100644 index 00000000..a6be57fb --- /dev/null +++ b/resources/config/benchmark/benchmark_pool.yml @@ -0,0 +1,92 @@ +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../../event_logs/BPIC_2012_W_train.csv.gz + # Event log to evaluate the discovered BPS model with + test_log_path: ../../event_logs/BPIC_2012_W_test.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + start_time: "start_time" + end_time: "end_time" + # Number of evaluations of the discovered BPS model + num_final_evaluations: 10 + # Metrics to evaluate the discovered BPS model + evaluation_metrics: + - 3_gram_distance + - 2_gram_distance + - absolute_event_distribution + - relative_event_distribution + - circadian_event_distribution + - arrival_event_distribution + - cycle_time_distribution + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + multitasking: false + enable_time_concurrency_threshold: 0.5 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 30 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Methods for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: true +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 40 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 5 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: pool + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 diff --git a/resources/config/benchmark_config.yml b/resources/config/benchmark_config.yml deleted file mode 100644 index ebd82b08..00000000 --- a/resources/config/benchmark_config.yml +++ /dev/null @@ -1,63 +0,0 @@ -version: 4 -common: - train_log_path: ../event_logs/BPIC_2012_train.csv.gz - test_log_path: ../event_logs/BPIC_2012_test.csv.gz - num_final_evaluations: 10 - evaluation_metrics: - - 3_gram_distance - - 2_gram_distance - - absolute_event_distribution - - relative_event_distribution - - circadian_event_distribution - - arrival_event_distribution - - cycle_time_distribution - log_ids: - case: case_id - activity: activity - resource: resource - start_time: start_time - end_time: end_time - enabled_time: enabled_time -preprocessing: - multitasking: false - enable_time_concurrency_threshold: 0.5 - concurrency_df: 0.75 - concurrency_l2l: 0.9 - concurrency_l1l: 0.9 -control_flow: - optimization_metric: n_gram_distance - num_iterations: 2 - num_evaluations_per_iteration: 5 - gateway_probabilities: discovery - discovery_algorithm: sm1 - epsilon: - - 0.1 - - 1.0 - eta: - - 0.2 - - 0.6 - replace_or_joins: - - true - - false - prioritize_parallelism: - - true - - false -resource_model: - optimization_metric: circadian_event_distribution - num_iterations: 2 - num_evaluations_per_iteration: 5 - resource_profiles: - discovery_type: differentiated - granularity: - - 15 - - 60 - confidence: - - 0.1 - - 1.0 - support: - - 0.1 - - 1.0 - participation: 0.4 -extraneous_activity_delays: - optimization_metric: relative_event_distribution - num_iterations: 1 diff --git a/resources/config/complete_configuration.yml b/resources/config/complete_configuration.yml index 866a768b..f37603e7 100644 --- a/resources/config/complete_configuration.yml +++ b/resources/config/complete_configuration.yml @@ -1,4 +1,4 @@ -version: 4 +version: 5 ########## # Common # @@ -11,7 +11,8 @@ common: case: "case_id" activity: "activity" resource: "resource" - start_time: "start_time" + enabled_time: "enabled_time" # If not present in the log, automatically estimated (see preprocessing) + start_time: "start_time" # Should be present, but if not, can be estimated (see preprocessing) end_time: "end_time" # Use this process model and skip its discovery process_model_path: ../models/LoanApp_simplified.bpmn @@ -35,18 +36,21 @@ common: use_observed_arrival_distribution: false # Whether to delete all files created during the optimization phases or not clean_intermediate_files: true - # Whether to discover case attributes or not - discover_case_attributes: false + # Whether to discover global/case/event attributes and their update rules or not + discover_data_attributes: false ################# # Preprocessing # ################# preprocessing: # If the log has start times, threshold to consider two activities as concurrent when computing the enabled time + # (if necessary). Two activities would be considered concurrent if their occurrences happening concurrently divided + # by their total occurrences is higher than this threshold. enable_time_concurrency_threshold: 0.75 - # If true, remove the multitasking by adjusting the timestamps (start/end) of those activities being executed at the same time by the same resource. + # If true, preprocess multitasking (i.e., one resource performing more than one activity at the same time) by + # adjusting the timestamps (start/end) of those activities being executed at the same time by the same resource. multitasking: false - # Thresholds for the heuristics' concurrency oracle (only used to estimate start times if missing in the log). + # Thresholds for the heuristics' concurrency oracle (only used to estimate start times if missing). concurrency_df: 0.9 # Directly-Follows threshold concurrency_l2l: 0.9 # Length 2 loops threshold concurrency_l1l: 0.9 # Length 1 loops threshold @@ -69,12 +73,12 @@ control_flow: mining_algorithm: sm1 # For Split Miner v1 and v2: Number of concurrent relations between events to be captured (between 0.0 and 1.0) epsilon: - - 0.0 - - 1.0 + - 0.05 + - 0.4 # Only for Split Miner v1: Threshold for filtering the incoming and outgoing edges (between 0.0 and 1.0) eta: - - 0.0 - - 1.0 + - 0.2 + - 0.7 # Only for Split Miner v1: Whether to replace non-trivial OR joins or not (true or false) replace_or_joins: - true @@ -83,6 +87,12 @@ control_flow: prioritize_parallelism: - true - false + # Discover data-aware branching rules, i.e., BPMN decision points based on value of data attributes + discover_branch_rules: true + # Minimum f-score value to consider the discovered data-aware branching rules + f_score: + - 0.3 + - 0.9 ################## # Resource model # @@ -99,7 +109,7 @@ resource_model: discover_batching_rules: false # Resource profiles configuration resource_profiles: - # Resource profile discovery type (differentiated, pool, undifferentiated) + # Resource profile discovery type (fuzzy, differentiated, pool, undifferentiated) discovery_type: differentiated # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) granularity: @@ -111,12 +121,16 @@ resource_model: - 0.85 # Minimum support of the intervals in the discovered calendar of a resource or set of resources (between 0.0 and 1.0) support: - - 0.01 - - 0.3 + - 0.05 + - 0.5 # Participation of a resource in the process to discover a calendar for them, gathered together otherwise (between 0.0 and 1.0) participation: - 0.2 - 0.5 + # Angle of the fuzzy trapezoid when computing the availability probability for an activity (angle from start to end) + fuzzy_angle: + - 0.1 + - 0.9 ##################### # Extraneous delays # diff --git a/resources/config/configuration_example.yml b/resources/config/configuration_example.yml index fa7328b0..186512ea 100644 --- a/resources/config/configuration_example.yml +++ b/resources/config/configuration_example.yml @@ -1,4 +1,12 @@ -version: 4 +################################################################################################################# +# Simple configuration example with i) no evaluation of the final BPS model, ii) 20 iterations of control-flow # +# discovery, iii) 20 iterations of resource model (differentiated) discovery, and iv) direct discovery of # +# extraneous delays. # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 ########## # Common # ########## @@ -10,40 +18,39 @@ common: case: "case_id" activity: "activity" resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed start_time: "start_time" end_time: "end_time" # Whether to discover case attributes or not - discover_case_attributes: false + discover_data_attributes: false ################# # Preprocessing # ################# preprocessing: - # Threshold to consider two activities as concurrent when computing the enabled time + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) enable_time_concurrency_threshold: 0.75 ################ # Control-flow # ################ control_flow: # Metric to guide the optimization process (loss function to minimize) - optimization_metric: n_gram_distance + optimization_metric: two_gram_distance # Number of optimization iterations over the search space num_iterations: 20 # Number of times to evaluate each iteration (using the mean of all of them) num_evaluations_per_iteration: 3 - # Methods for discovering gateway probabilities - gateway_probabilities: - - equiprobable - - discovery + # Method for discovering gateway probabilities + gateway_probabilities: discovery # Discover process model with SplitMiner v3 mining_algorithm: sm1 # Number of concurrent relations between events to be captured epsilon: - - 0.0 - - 1.0 + - 0.05 + - 0.4 # Threshold for filtering the incoming and outgoing edges eta: - - 0.0 - - 1.0 + - 0.2 + - 0.7 # Whether to replace non-trivial OR joins or not replace_or_joins: - true @@ -68,26 +75,24 @@ resource_model: # Resource profiles configuration resource_profiles: # Resource profile discovery type - discovery_type: pool + discovery_type: differentiated # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) - granularity: - - 15 - - 60 + granularity: 60 # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) confidence: - 0.5 - 0.85 # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) support: - - 0.01 - - 0.3 + - 0.05 + - 0.5 # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) participation: 0.4 ##################### # Extraneous delays # ##################### extraneous_activity_delays: - # Metric to guide the optimization process (loss function to minimize) - optimization_metric: relative_emd + # Method to compute the extraneous delay + discovery_method: eclipse-aware # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) num_iterations: 1 diff --git a/resources/config/configuration_example_data_aware.yml b/resources/config/configuration_example_data_aware.yml new file mode 100644 index 00000000..aeaddffb --- /dev/null +++ b/resources/config/configuration_example_data_aware.yml @@ -0,0 +1,104 @@ +################################################################################################################# +# Simple configuration example with i) no evaluation of the final BPS model, ii) 10 iterations of control-flow # +# discovery (BPMN model provided) with data-aware decision points, iii) 20 iterations of resource model # +# (differentiated) discovery, and iv) no discovery of extraneous delays. # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Whether to discover case attributes or not + discover_data_attributes: true +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Method for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: + - true + - false + # Discover data-aware branching rules, i.e., BPMN decision points based on value of data attributes + discover_branch_rules: true + # Minimum f-score value to consider the discovered data-aware branching rules + f_score: + - 0.3 + - 0.9 +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated + # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) + granularity: 60 + # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) + confidence: + - 0.5 + - 0.85 + # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) + support: + - 0.05 + - 0.5 + # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) + participation: 0.4 +##################### +# Extraneous delays # +##################### +extraneous_activity_delays: + # Method to compute the extraneous delay + discovery_method: eclipse-aware + # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) + num_iterations: 1 diff --git a/resources/config/configuration_example_fuzzy.yml b/resources/config/configuration_example_fuzzy.yml new file mode 100644 index 00000000..5e74418d --- /dev/null +++ b/resources/config/configuration_example_fuzzy.yml @@ -0,0 +1,84 @@ +################################################################################################################# +# Simple configuration example with i) no evaluation of the final BPS model, ii) 20 iterations of control-flow # +# discovery, iii) 10 iterations of resource model (fuzzy availability) discovery, and iv) no discovery of # +# extraneous delays. # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 +########## +# Common # +########## +common: + # Path to the event log in CSV format + train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz + # Specify the name for each of the columns in the CSV file (XES standard by default) + log_ids: + case: "case_id" + activity: "activity" + resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed + start_time: "start_time" + end_time: "end_time" + # Whether to discover case attributes or not + discover_data_attributes: false +################# +# Preprocessing # +################# +preprocessing: + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) + enable_time_concurrency_threshold: 0.75 +################ +# Control-flow # +################ +control_flow: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: two_gram_distance + # Number of optimization iterations over the search space + num_iterations: 20 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Method for discovering gateway probabilities + gateway_probabilities: discovery + # Discover process model with SplitMiner v3 + mining_algorithm: sm1 + # Number of concurrent relations between events to be captured + epsilon: + - 0.05 + - 0.4 + # Threshold for filtering the incoming and outgoing edges + eta: + - 0.2 + - 0.7 + # Whether to replace non-trivial OR joins or not + replace_or_joins: + - true + - false + # Whether to prioritize parallelism over loops or not + prioritize_parallelism: + - true + - false +################## +# Resource model # +################## +resource_model: + # Metric to guide the optimization process (loss function to minimize) + optimization_metric: circadian_emd + # Number of optimization iterations over the search space + num_iterations: 10 + # Number of times to evaluate each iteration (using the mean of all of them) + num_evaluations_per_iteration: 3 + # Whether to discover prioritization or batching behavior + discover_prioritization_rules: false + discover_batching_rules: false + # Resource profiles configuration + resource_profiles: + # Resource profile discovery type + discovery_type: differentiated_fuzzy + # Duration of each granule in the resource calendar that will get its own probability + granularity: 60 + # Angle of the fuzzy trapezoid when computing the availability probability for an activity (angle from start to end) + fuzzy_angle: + - 0.1 + - 0.9 diff --git a/resources/config/configuration_example_with_evaluation.yml b/resources/config/configuration_example_with_evaluation.yml index e9294cdc..10f412a7 100644 --- a/resources/config/configuration_example_with_evaluation.yml +++ b/resources/config/configuration_example_with_evaluation.yml @@ -1,4 +1,10 @@ -version: 4 +################################################################################################################# +# Same simple configuration as 'configuration_example.yml' but evaluation the quality of the final BPS model # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 ########## # Common # ########## @@ -10,6 +16,7 @@ common: case: "case_id" activity: "activity" resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed start_time: "start_time" end_time: "end_time" # Event log to evaluate the discovered BPS model with @@ -26,37 +33,35 @@ common: - arrival_event_distribution - cycle_time_distribution # Whether to discover case attributes or not - discover_case_attributes: false + discover_data_attributes: false ################# # Preprocessing # ################# preprocessing: - # Threshold to consider two activities as concurrent when computing the enabled time + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) enable_time_concurrency_threshold: 0.75 ################ # Control-flow # ################ control_flow: # Metric to guide the optimization process (loss function to minimize) - optimization_metric: n_gram_distance + optimization_metric: two_gram_distance # Number of optimization iterations over the search space num_iterations: 20 # Number of times to evaluate each iteration (using the mean of all of them) num_evaluations_per_iteration: 3 # Methods for discovering gateway probabilities - gateway_probabilities: - - equiprobable - - discovery + gateway_probabilities: discovery # Discover process model with SplitMiner v3 mining_algorithm: sm1 # Number of concurrent relations between events to be captured epsilon: - - 0.0 - - 1.0 + - 0.05 + - 0.4 # Threshold for filtering the incoming and outgoing edges eta: - - 0.0 - - 1.0 + - 0.2 + - 0.7 # Whether to replace non-trivial OR joins or not replace_or_joins: - true @@ -81,26 +86,24 @@ resource_model: # Resource profiles configuration resource_profiles: # Resource profile discovery type - discovery_type: pool + discovery_type: differentiated # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) - granularity: - - 15 - - 60 + granularity: 60 # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) confidence: - 0.5 - 0.85 # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) support: - - 0.01 - - 0.3 + - 0.05 + - 0.5 # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) participation: 0.4 ##################### # Extraneous delays # ##################### extraneous_activity_delays: - # Metric to guide the optimization process (loss function to minimize) - optimization_metric: relative_emd + # Method to compute the extraneous delay + discovery_method: eclipse-aware # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) num_iterations: 1 diff --git a/resources/config/configuration_example_with_provided_process_model.yml b/resources/config/configuration_example_with_provided_process_model.yml index e44e0c2b..b490161d 100644 --- a/resources/config/configuration_example_with_provided_process_model.yml +++ b/resources/config/configuration_example_with_provided_process_model.yml @@ -1,4 +1,10 @@ -version: 4 +################################################################################################################# +# Same simple configuration as 'configuration_example.yml' but providing the BPMN model # +################################################################################################################# +# - Increase the num_iterations to (potentially) improve the quality of that discovered model # +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 ########## # Common # ########## @@ -10,32 +16,31 @@ common: case: "case_id" activity: "activity" resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed start_time: "start_time" end_time: "end_time" # Use this process model and skip its discovery process_model_path: ../models/LoanApp_simplified.bpmn # Whether to discover case attributes or not - discover_case_attributes: false + discover_data_attributes: false ################# # Preprocessing # ################# preprocessing: - # Threshold to consider two activities as concurrent when computing the enabled time + # Threshold to consider two activities as concurrent when computing the enabled time (if necessary) enable_time_concurrency_threshold: 0.75 ################ # Control-flow # ################ control_flow: # Metric to guide the optimization process (loss function to minimize) - optimization_metric: n_gram_distance + optimization_metric: two_gram_distance # Number of optimization iterations over the search space - num_iterations: 5 + num_iterations: 1 # Number of times to evaluate each iteration (using the mean of all of them) num_evaluations_per_iteration: 3 # Methods for discovering gateway probabilities - gateway_probabilities: - - equiprobable - - discovery + gateway_probabilities: discovery ################## # Resource model # ################## @@ -54,24 +59,22 @@ resource_model: # Resource profile discovery type discovery_type: pool # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) - granularity: - - 15 - - 60 + granularity: 60 # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) confidence: - 0.5 - 0.85 # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) support: - - 0.01 - - 0.3 + - 0.05 + - 0.5 # Participation of a resource in the process to discover a calendar for them (gathered together otherwise) participation: 0.4 ##################### # Extraneous delays # ##################### extraneous_activity_delays: - # Metric to guide the optimization process (loss function to minimize) - optimization_metric: relative_emd + # Method to compute the extraneous delay + discovery_method: eclipse-aware # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) num_iterations: 1 diff --git a/resources/config/configuration_one_shot.yml b/resources/config/configuration_one_shot.yml index b2e617c8..9c45b7e8 100644 --- a/resources/config/configuration_one_shot.yml +++ b/resources/config/configuration_one_shot.yml @@ -1,4 +1,10 @@ -version: 4 +################################################################################################################# +# Simple configuration example for running SIMOD without parameter optimization steps. The defined parameters # +# should be individual values and not intervals, as there is no optimization. # +################################################################################################################# +# - Visit 'complete_configuration.yml' example for a description of all configurable parameters # +################################################################################################################# +version: 5 ########## # Common # ########## @@ -10,6 +16,7 @@ common: case: "case_id" activity: "activity" resource: "resource" + enabled_time: "enabled_time" # If not present in the log, automatically computed start_time: "start_time" end_time: "end_time" ################ @@ -31,7 +38,7 @@ control_flow: # Whether to replace non-trivial OR joins or not replace_or_joins: false # Whether to prioritize parallelism over loops or not - prioritize_parallelism: false + prioritize_parallelism: true ################## # Resource model # ################## @@ -45,7 +52,7 @@ resource_model: # Resource profile discovery type discovery_type: differentiated # Time granularity (in minutes) for the resource calendar (the higher the density of events in the log, the smaller the granularity can be) - granularity: 30 + granularity: 60 # Minimum confidence of the intervals in the discovered calendar (of a resource or set of resources) confidence: 0.6 # Minimum support of the intervals in the discovered calendar (of a resource or set of resources) @@ -56,7 +63,7 @@ resource_model: # Extraneous delays # ##################### extraneous_activity_delays: - # Metric to guide the optimization process (loss function to minimize) - optimization_metric: relative_emd + # Method to compute the extraneous delay + discovery_method: eclipse-aware # Number of optimization iterations over the search space (1 = direct discovery, no optimization stage) num_iterations: 1 diff --git a/resources/config/sample.yml b/resources/config/sample.yml deleted file mode 100644 index f935a5a4..00000000 --- a/resources/config/sample.yml +++ /dev/null @@ -1,68 +0,0 @@ -version: 4 -common: - train_log_path: ../event_logs/LoanApp_simplified_train.csv.gz - test_log_path: ../event_logs/LoanApp_simplified_test.csv.gz - # process_model_path: resources/models/LoanApp_simplified.bpmn # Uncomment to use this BPMN model as process model - num_final_evaluations: 10 # Number of evaluations of the discovered BPS model. - evaluation_metrics: # Metrics to evaluate the discovered BPS model with. - - 3_gram_distance - - 2_gram_distance - - absolute_event_distribution - - relative_event_distribution - - circadian_event_distribution - - arrival_event_distribution - - cycle_time_distribution - log_ids: - case: case_id - activity: activity - resource: resource - start_time: start_time - end_time: end_time - enabled_time: AssignedTime - clean_intermediate_files: false -preprocessing: - multitasking: false # Reassign activity durations when happening in multitasking. - enable_time_concurrency_threshold: 0.5 # Concurrency threshold for the enabled time computation. - concurrency_df: 0.75 # Concurrency thresholds for the start time (and enabled time) estimations when - concurrency_l2l: 0.9 # the start time is missing in the train event log. Using the Heuristics Miner - concurrency_l1l: 0.9 # concurrency oracle. -control_flow: - optimization_metric: n_gram_distance - num_iterations: 3 # Number of iterations to run the hyper-optimization process for control-flow discovery - num_evaluations_per_iteration: 5 # Number of times to evaluate each iteration (using the mean of all of them) - gateway_probabilities: # Methods to discover the probabilities of each gateway - - equiprobable - - discovery - discovery_algorithm: sm1 # Process model discovery algorithm: sm1 (Split Miner v1) or sm2 (Split Miner v2) - epsilon: - - 0.0 - - 1.0 - eta: - - 0.0 - - 1.0 - replace_or_joins: - - true - - false - prioritize_parallelism: - - true - - false -resource_model: - optimization_metric: circadian_event_distribution - num_iterations: 3 # Number of iterations to run the hyper-optimization process for control-flow discovery - num_evaluations_per_iteration: 5 # Number of times to evaluate each iteration (using the mean of all of them) - resource_profiles: - discovery_type: differentiated # Resource discovery type ('undifferentiated', 'pool', or 'differentiated') - granularity: - - 15 - - 60 - confidence: - - 0.5 - - 0.85 - support: - - 0.01 - - 0.3 - participation: 0.4 -extraneous_activity_delays: - optimization_metric: relative_event_distribution - num_iterations: 1 # Number of iterations of the optimization process (if 1, direct discovery without optimization) - num_evaluations_per_iteration: 3 # Number of times to evaluate each iteration (using the mean of all of them) diff --git a/resources/event_logs/LoanApp_simplified_test.csv.gz b/resources/event_logs/LoanApp_simplified_test.csv.gz index d3fad276..8174e0c1 100644 Binary files a/resources/event_logs/LoanApp_simplified_test.csv.gz and b/resources/event_logs/LoanApp_simplified_test.csv.gz differ diff --git a/resources/event_logs/LoanApp_simplified_train.csv.gz b/resources/event_logs/LoanApp_simplified_train.csv.gz index 8f90eac1..b250be6c 100644 Binary files a/resources/event_logs/LoanApp_simplified_train.csv.gz and b/resources/event_logs/LoanApp_simplified_train.csv.gz differ diff --git a/resources/models/LoanApp_simplified.bpmn b/resources/models/LoanApp_simplified.bpmn index efa9f207..8f0e87e8 100644 --- a/resources/models/LoanApp_simplified.bpmn +++ b/resources/models/LoanApp_simplified.bpmn @@ -1 +1,690 @@ -minutesminutesQBP_DEFAULT_RESOURCEminutesQBP_DEFAULT_RESOURCEminutesQBP_DEFAULT_RESOURCEminutesqbp_e6c371a1-c8f5-49da-9e5c-50e0c4def74fminutesqbp_8e423262-8917-4d8a-9dcc-ca320f631dbcminutesQBP_DEFAULT_RESOURCEminutesQBP_DEFAULT_RESOURCEminutesQBP_DEFAULT_RESOURCEminutesqbp_e6c371a1-c8f5-49da-9e5c-50e0c4def74fminutesqbp_71f9ab6f-0283-4a62-bd00-a1256716dd47minutesqbp_315189ac-8191-4977-b204-da959c5e98c7hoursqbp_5163731d-12cd-47f5-aa87-515c0a7da49bhourshourshourssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssecondssid-BCF61F61-A6CF-4966-A388-3AE2E521BF21sid-2210D909-6E46-4781-901B-04E0C16D5797sid-04651F44-C5CF-43DE-B0EA-F450CEB0192BFlow_14kjyd9sid-BCF61F61-A6CF-4966-A388-3AE2E521BF21sid-92B5B1F2-7B5C-45A9-8C6B-BCC1E0490D99Flow_0p9otppsid-2210D909-6E46-4781-901B-04E0C16D5797sid-92B5B1F2-7B5C-45A9-8C6B-BCC1E0490D99sid-F4C85F4E-A227-4D05-AE55-5ACD9A2CF15FFlow_1c6l3pqsid-3CB73C6E-F3C8-4677-B8D1-8094891038D5Flow_1f1wk9vsid-5A3C5593-7202-4B95-84E7-3C73EF2C0961Flow_0m3b6vqsid-CA2D6FBE-9060-4A55-AA62-B8EF20FE897Asid-5A3C5593-7202-4B95-84E7-3C73EF2C0961sid-B04F2DE9-EE3E-4AC6-8470-B22F242E48AEsid-20A44905-F224-4AC2-8D6E-B25B4416D362Flow_196r15vFlow_1shf01mFlow_00u67imFlow_0xmf19tFlow_1gxmp37sid-2C9949A2-3F17-49DB-BA56-6C496FCE66F2sid-2C9949A2-3F17-49DB-BA56-6C496FCE66F2Flow_0xmf19tFlow_1shf01msid-04651F44-C5CF-43DE-B0EA-F450CEB0192Bsid-20A44905-F224-4AC2-8D6E-B25B4416D362Flow_0i6edvnFlow_0v4v45oFlow_0xfev5gFlow_0i6edvnFlow_01ozfhwFlow_01ozfhwFlow_1cces2aFlow_0wgg1r6Flow_0p9otppFlow_14kjyd9sid-F4C85F4E-A227-4D05-AE55-5ACD9A2CF15FFlow_0m3b6vqFlow_1c6l3pqFlow_0v4v45osid-3CB73C6E-F3C8-4677-B8D1-8094891038D5sid-CA2D6FBE-9060-4A55-AA62-B8EF20FE897AFlow_0xfev5gFlow_1f1wk9vFlow_1cces2aFlow_1gxmp37Flow_0wgg1r6Flow_00u67imsid-B04F2DE9-EE3E-4AC6-8470-B22F242E48AEFlow_196r15v \ No newline at end of file + + + + + + + + + + + + + + + + + + + sid-BCF61F61-A6CF-4966-A388-3AE2E521BF21 + sid-2210D909-6E46-4781-901B-04E0C16D5797 + + + + + + + + + + + + + + + sid-04651F44-C5CF-43DE-B0EA-F450CEB0192B + Flow_14kjyd9 + sid-BCF61F61-A6CF-4966-A388-3AE2E521BF21 + + + + + + + + + + + + + + + + + sid-92B5B1F2-7B5C-45A9-8C6B-BCC1E0490D99 + Flow_0p9otpp + + + + + + + + + + + + + + + sid-2210D909-6E46-4781-901B-04E0C16D5797 + sid-92B5B1F2-7B5C-45A9-8C6B-BCC1E0490D99 + sid-F4C85F4E-A227-4D05-AE55-5ACD9A2CF15F + + + + + + + + + + + + + + + + + Flow_1c6l3pq + sid-3CB73C6E-F3C8-4677-B8D1-8094891038D5 + + + + + + + + + + + + + + + + + Flow_1f1wk9v + sid-5A3C5593-7202-4B95-84E7-3C73EF2C0961 + + + + + + + + + + + + + + + + + Flow_0m3b6vq + sid-CA2D6FBE-9060-4A55-AA62-B8EF20FE897A + + + + + + + + + + + + + + + sid-5A3C5593-7202-4B95-84E7-3C73EF2C0961 + sid-B04F2DE9-EE3E-4AC6-8470-B22F242E48AE + sid-20A44905-F224-4AC2-8D6E-B25B4416D362 + + + + + + + + + + + + + + + + + sid-B04F2DE9-EE3E-4AC6-8470-B22F242E48AE + Flow_1shf01m + + + + + + + + + + + + + + + + + Flow_0wgg1r6 + Flow_0xmf19t + + + + + + + + + + + + + + + + + Flow_1cces2a + sid-2C9949A2-3F17-49DB-BA56-6C496FCE66F2 + + + + + + + + + + + + + + + + sid-2C9949A2-3F17-49DB-BA56-6C496FCE66F2 + Flow_0xmf19t + Flow_1shf01m + + + + + + + sid-04651F44-C5CF-43DE-B0EA-F450CEB0192B + + + + + + + + + + + + + + + + + sid-20A44905-F224-4AC2-8D6E-B25B4416D362 + Flow_0i6edvn + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_0v4v45o + Flow_0xfev5g + + + + + + + + + + + + + + + + Flow_0i6edvn + Flow_01ozfhw + + + + + + + + + + + + + + + Flow_01ozfhw + Flow_1cces2a + Flow_0wgg1r6 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Flow_0p9otpp + Flow_14kjyd9 + + + + + + + + + + + + + + + sid-F4C85F4E-A227-4D05-AE55-5ACD9A2CF15F + Flow_0m3b6vq + Flow_1c6l3pq + Flow_0v4v45o + + + + + + + + + + + + + + sid-3CB73C6E-F3C8-4677-B8D1-8094891038D5 + sid-CA2D6FBE-9060-4A55-AA62-B8EF20FE897A + Flow_0xfev5g + Flow_1f1wk9v + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/simod/__init__.py b/src/simod/__init__.py index e69de29b..e7b5ac10 100644 --- a/src/simod/__init__.py +++ b/src/simod/__init__.py @@ -0,0 +1 @@ +__all__ = ["simod"] diff --git a/src/simod/case_attributes/__init__.py b/src/simod/branch_rules/__init__.py similarity index 100% rename from src/simod/case_attributes/__init__.py rename to src/simod/branch_rules/__init__.py diff --git a/src/simod/branch_rules/discovery.py b/src/simod/branch_rules/discovery.py new file mode 100644 index 00000000..54d925a8 --- /dev/null +++ b/src/simod/branch_rules/discovery.py @@ -0,0 +1,30 @@ +import pandas as pd +from typing import List + +from simod.branch_rules.types import BranchRules + +from pix_framework.io.event_log import EventLogIDs +from pix_framework.discovery.gateway_probabilities import GatewayProbabilities +from pix_framework.discovery.gateway_conditions.gateway_conditions import discover_gateway_conditions + + +def discover_branch_rules(bpmn_graph, log: pd.DataFrame, log_ids: EventLogIDs, f_score=0.7) -> list[BranchRules]: + """ + Discover branch_rules from a log. + """ + rules = discover_gateway_conditions(bpmn_graph, log, log_ids, f_score_threshold=f_score) + + rules = list(map(lambda x: BranchRules.from_dict(x), rules)) + + return rules + + +def map_branch_rules_to_flows(gateway_probabilities: List[GatewayProbabilities], branch_rules: List[BranchRules]): + condition_lookup = {rule.id: rule for rule in branch_rules} + + for gateway in gateway_probabilities: + for path in gateway.outgoing_paths: + if path.path_id in condition_lookup: + path.condition_id = condition_lookup[path.path_id].id + + return gateway_probabilities diff --git a/src/simod/branch_rules/types.py b/src/simod/branch_rules/types.py new file mode 100644 index 00000000..faa8feec --- /dev/null +++ b/src/simod/branch_rules/types.py @@ -0,0 +1,45 @@ +from dataclasses import dataclass + + +@dataclass +class BranchRule: + attribute: str + comparison: str + value: str + + @staticmethod + def from_dict(data: dict) -> "BranchRule": + return BranchRule( + attribute=data["attribute"], + comparison=data["comparison"], + value=data["value"] + ) + + def to_dict(self): + return { + "attribute": self.attribute, + "comparison": self.comparison, + "value": self.value + } + + +@dataclass +class BranchRules: + id: str + rules: list[list[BranchRule]] + + @staticmethod + def from_dict(data: dict) -> "BranchRules": + return BranchRules( + id=data["id"], + rules=[ + [BranchRule.from_dict(rule) for rule in rule_set] + for rule_set in data["rules"] + ] + ) + + def to_dict(self): + return { + "id": self.id, + "rules": [[rule.to_dict() for rule in rule_set] for rule_set in self.rules] + } diff --git a/src/simod/case_attributes/discovery.py b/src/simod/case_attributes/discovery.py deleted file mode 100644 index 463cc81f..00000000 --- a/src/simod/case_attributes/discovery.py +++ /dev/null @@ -1,28 +0,0 @@ -import pandas as pd -from pix_framework.discovery.case_attribute.discovery import discover_case_attributes as discover_case_attributes_ -from pix_framework.io.event_log import EventLogIDs - -from simod.case_attributes.types import CaseAttribute - - -def discover_case_attributes(log: pd.DataFrame, log_ids: EventLogIDs) -> list[CaseAttribute]: - """ - Discover case attributes from a log ignoring common non-case columns. - """ - attributes = discover_case_attributes_( - event_log=log, - log_ids=log_ids, - avoid_columns=[ - log_ids.case, - log_ids.activity, - log_ids.enabled_time, - log_ids.start_time, - log_ids.end_time, - log_ids.resource, - ], - confidence_threshold=0.95, - ) - - attributes = list(map(CaseAttribute.from_dict, attributes)) - - return attributes diff --git a/src/simod/case_attributes/types.py b/src/simod/case_attributes/types.py deleted file mode 100644 index 7269a410..00000000 --- a/src/simod/case_attributes/types.py +++ /dev/null @@ -1,40 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from typing import Union - - -class CaseAttributeType(Enum): - DISCRETE = "discrete" - CONTINUOUS = "continuous" - - -@dataclass -class CaseAttribute: - name: str - type: CaseAttributeType - values: Union[list[dict], dict[str, float]] - - @staticmethod - def from_dict(case_attribute: dict) -> "CaseAttribute": - """ - Creates a CaseAttribute object from a dictionary returned by case_attribute_discovery.discovery. - """ - return CaseAttribute( - name=case_attribute["name"], - type=CaseAttributeType(case_attribute["type"]), - values=case_attribute["values"], - ) - - def to_prosimos(self) -> dict: - if self.type == CaseAttributeType.CONTINUOUS: - return { - "name": self.name, - "type": self.type.value, - "values": self.values, - } - else: - return { - "name": self.name, - "type": self.type.value, - "values": [{"key": value["key"], "value": value["probability"]} for value in self.values], - } diff --git a/src/simod/cli.py b/src/simod/cli.py index 670e6862..d161c13c 100644 --- a/src/simod/cli.py +++ b/src/simod/cli.py @@ -7,6 +7,7 @@ from pix_framework.filesystem.file_manager import get_random_folder_id from simod.event_log.event_log import EventLog +from simod.runtime_meter import RuntimeMeter from simod.settings.simod_settings import SimodSettings from simod.simod import Simod @@ -87,7 +88,11 @@ def main( output = output if output is not None else (Path.cwd() / "outputs" / get_random_folder_id()).absolute() + # To measure the runtime of each stage + runtimes = RuntimeMeter() + # Read and preprocess event log + runtimes.start(RuntimeMeter.PREPROCESSING) event_log = EventLog.from_path( log_ids=settings.common.log_ids, train_log_path=settings.common.train_log_path, @@ -95,10 +100,11 @@ def main( preprocessing_settings=settings.preprocessing, need_test_partition=settings.common.perform_final_evaluation, ) + runtimes.stop(RuntimeMeter.PREPROCESSING) # Instantiate and run Simod simod = Simod(settings, event_log=event_log, output_dir=output) - simod.run() + simod.run(runtimes=runtimes) if __name__ == "__main__": diff --git a/src/simod/control_flow/discovery.py b/src/simod/control_flow/discovery.py index 8d262001..4fda675e 100644 --- a/src/simod/control_flow/discovery.py +++ b/src/simod/control_flow/discovery.py @@ -1,7 +1,10 @@ +import uuid from dataclasses import dataclass from pathlib import Path from typing import List, Tuple +from lxml import etree + from simod.cli_formatter import print_step from simod.control_flow.settings import HyperoptIterationParams from simod.settings.control_flow_settings import ( @@ -9,18 +12,31 @@ ) from simod.utilities import execute_external_command, is_windows -split_miner_jar_path: Path = Path(__file__).parent / "lib/splitminer-1.6-all.jar" +split_miner_jar_path: Path = Path(__file__).parent / "lib/split-miner-1.7.1-all.jar" +bpmn_layout_jar_path: Path = Path(__file__).parent / "lib/bpmn-layout-1.0.6-jar-with-dependencies.jar" def discover_process_model(log_path: Path, output_model_path: Path, params: HyperoptIterationParams): """ - Run the process model discovery algorithm specified in the [params] to discover - a process model in [output_model_path] from the (XES) event log in [log_path]. + Runs the specified process model discovery algorithm to extract a process model + from an event log and save it to the given output path. - :param log_path: Path to the event log in XES format for the Split Miner algorithms. - :param output_model_path: Path to write the discovered process model. - :param params: configuration class specifying the process model discovery algorithm and its parameters. - """ + This function supports Split Miner V1 and Split Miner V2 as discovery algorithms. + + Parameters + ---------- + log_path : :class:`pathlib.Path` + Path to the event log in XES format, required for Split Miner algorithms. + output_model_path : :class:`pathlib.Path` + Path to save the discovered process model. + params : :class:`~simod.resource_model.settings.HyperoptIterationParams` + Configuration containing the process model discovery algorithm and its parameters. + + Raises + ------ + ValueError + If the specified process model discovery algorithm is unknown. + """ if params.mining_algorithm is ProcessModelDiscoveryAlgorithm.SPLIT_MINER_V1: discover_process_model_with_split_miner_v1( SplitMinerV1Settings( @@ -36,8 +52,101 @@ def discover_process_model(log_path: Path, output_model_path: Path, params: Hype discover_process_model_with_split_miner_v2(SplitMinerV2Settings(log_path, output_model_path, params.epsilon)) else: raise ValueError(f"Unknown process model discovery algorithm: {params.mining_algorithm}") - + # Assert that model file was created assert output_model_path.exists(), f"Error trying to discover the process model in '{output_model_path}'." + # Post-process to transform implicit activity self-loops into explicit (modeled through gateways) + print(f"Post-processing discovered process model to explicitly model self-loops through gateways.") + post_process_bpmn_self_loops(output_model_path) + + +def _generate_node_id(): + return f"node_{uuid.uuid4()}" + + +def post_process_bpmn_self_loops(bpmn_model_path: Path): + tree = etree.parse(bpmn_model_path) + root = tree.getroot() + nsmap = root.nsmap + + bpmn_namespace = nsmap.get(None, "http://www.omg.org/spec/BPMN/20100524/MODEL") + ns = {"bpmn": bpmn_namespace} + + tasks = root.findall(".//bpmn:task", namespaces=ns) + sequence_flows = root.findall(".//bpmn:sequenceFlow", namespaces=ns) + process = root.find(".//bpmn:process", namespaces=ns) + + for task in tasks: + loop_characteristics = task.find("bpmn:standardLoopCharacteristics", namespaces=ns) + if loop_characteristics is not None: + # Task with self-loop + task_id = task.get("id") + # Remove loop characteristics + task.remove(loop_characteristics) + # Generate unique IDs + gt1_id = _generate_node_id() + gt2_id = _generate_node_id() + sf1_id = _generate_node_id() + sf2_id = _generate_node_id() + sf3_id = _generate_node_id() + # Create exclusive gateways with attributes + gt1 = etree.Element("{%s}exclusiveGateway" % bpmn_namespace, id=gt1_id, gatewayDirection="Converging") + gt2 = etree.Element("{%s}exclusiveGateway" % bpmn_namespace, id=gt2_id, gatewayDirection="Diverging") + process.append(gt1) + process.append(gt2) + # Modify existing sequence flows + incoming_gt1_1, outgoing_gt2_1 = None, None + for sf in sequence_flows: + if sf.get("targetRef") == task_id: + sf.set("targetRef", gt1_id) + incoming_gt1_1 = etree.Element("{%s}incoming" % bpmn_namespace) + incoming_gt1_1.text = sf.get("id") + if sf.get("sourceRef") == task_id: + sf.set("sourceRef", gt2_id) + outgoing_gt2_1 = etree.Element("{%s}outgoing" % bpmn_namespace) + outgoing_gt2_1.text = sf.get("id") + # Create new sequence flows + sf1 = etree.Element("{%s}sequenceFlow" % bpmn_namespace, id=sf1_id, sourceRef=gt1_id, targetRef=task_id) + process.append(sf1) + sf2 = etree.Element("{%s}sequenceFlow" % bpmn_namespace, id=sf2_id, sourceRef=task_id, targetRef=gt2_id) + process.append(sf2) + sf3 = etree.Element("{%s}sequenceFlow" % bpmn_namespace, id=sf3_id, sourceRef=gt2_id, targetRef=gt1_id) + process.append(sf3) + # Add incoming and outgoing elements for gateways + outgoing_gt1_1 = etree.Element("{%s}outgoing" % bpmn_namespace) + outgoing_gt1_1.text = sf1_id + incoming_gt1_2 = etree.Element("{%s}incoming" % bpmn_namespace) + incoming_gt1_2.text = sf3_id + incoming_gt2_1 = etree.Element("{%s}incoming" % bpmn_namespace) + incoming_gt2_1.text = sf2_id + outgoing_gt2_2 = etree.Element("{%s}outgoing" % bpmn_namespace) + outgoing_gt2_2.text = sf3_id + gt1.append(incoming_gt1_1) + gt1.append(incoming_gt1_2) + gt1.append(outgoing_gt1_1) + gt2.append(incoming_gt2_1) + gt2.append(outgoing_gt2_1) + gt2.append(outgoing_gt2_2) + # Write to file + tree.write(bpmn_model_path, xml_declaration=True, encoding="UTF-8", pretty_print=True) + + +def add_bpmn_diagram_to_model(bpmn_model_path: Path): + """ + Add BPMN diagram to the control flow of the existing BPMN model using the hierarchical layout algorithm. + This function overwrites the existing BPMN model file. + + :param bpmn_model_path: + :return: None + """ + global bpmn_layout_jar_path + + if is_windows(): + args = ["java", "-jar", '"' + str(bpmn_layout_jar_path) + '"', '"' + str(bpmn_model_path) + '"'] + else: + args = ["java", "-jar", str(bpmn_layout_jar_path), str(bpmn_model_path)] + + print_step(f"Adding BPMN diagram to the model: {args}") + execute_external_command(args) @dataclass @@ -122,11 +231,11 @@ def discover_process_model_with_split_miner_v2(settings: SplitMinerV2Settings): def _prepare_split_miner_params( - split_miner: Path, - log_path: Path, - output_model_path: Path, - strip_output_suffix: bool = True, - headless: bool = True, + split_miner: Path, + log_path: Path, + output_model_path: Path, + strip_output_suffix: bool = True, + headless: bool = True, ) -> Tuple[List[str], str, str, str]: if is_windows(): # Windows: ';' as separator and escape string with '"' diff --git a/src/simod/control_flow/lib/bpmn-layout-1.0.6-jar-with-dependencies.jar b/src/simod/control_flow/lib/bpmn-layout-1.0.6-jar-with-dependencies.jar new file mode 100644 index 00000000..fd8db4f6 Binary files /dev/null and b/src/simod/control_flow/lib/bpmn-layout-1.0.6-jar-with-dependencies.jar differ diff --git a/src/simod/control_flow/lib/splitminer-1.6-all.jar b/src/simod/control_flow/lib/split-miner-1.7.1-all.jar similarity index 87% rename from src/simod/control_flow/lib/splitminer-1.6-all.jar rename to src/simod/control_flow/lib/split-miner-1.7.1-all.jar index 2c8ce6a4..d0fdf1ac 100644 Binary files a/src/simod/control_flow/lib/splitminer-1.6-all.jar and b/src/simod/control_flow/lib/split-miner-1.7.1-all.jar differ diff --git a/src/simod/control_flow/optimizer.py b/src/simod/control_flow/optimizer.py index 43e15451..b71fec1b 100644 --- a/src/simod/control_flow/optimizer.py +++ b/src/simod/control_flow/optimizer.py @@ -12,6 +12,8 @@ GatewayProbabilitiesDiscoveryMethod, compute_gateway_probabilities, ) +from simod.branch_rules.discovery import discover_branch_rules, map_branch_rules_to_flows +from simod.branch_rules.types import BranchRules from pix_framework.filesystem.file_manager import create_folder, get_random_folder_id, remove_asset from pix_framework.io.bpm_graph import BPMNGraph @@ -26,6 +28,37 @@ class ControlFlowOptimizer: + """ + Optimizes the control-flow of a business process model using hyperparameter optimization. + + This class performs iterative optimization to refine the structure of a process model + and discover optimal gateway probabilities. It evaluates different configurations to + improve the process model based on a given metric. + + The search space is built based on the parameters ranges in [settings]. + + Attributes + ---------- + event_log : :class:`EventLog` + Event log containing train and validation partitions. + initial_bps_model : :class:`BPSModel` + Business process simulation (BPS) model to use as a base, by replacing its control-flow model + with the discovered one in each iteration. + settings : :class:`ControlFlowSettings` + Configuration settings to build the search space for the optimization process. + base_directory : :class:`pathlib.Path` + Root directory where output files will be stored. + best_bps_model : :class:`BPSModel`, optional + Best discovered BPS model after the optimization process. + evaluation_measurements : :class:`pandas.DataFrame` + Quality measures recorded for each hyperopt iteration. + + Notes + ----- + - If no process model is provided, a discovery method will be used. + - Optimization is performed using TPE-hyperparameter optimization. + """ + # Event log with train/validation partitions event_log: EventLog # BPS model taken as starting point @@ -58,8 +91,10 @@ def __init__(self, event_log: EventLog, bps_model: BPSModel, settings: ControlFl # Not provided, create path to best discovered model self._need_to_discover_model = True # Export training log (XES format) for SplitMiner - self._xes_train_log_path = self.base_directory / (self.event_log.process_name + ".xes") - self.event_log.train_to_xes(self._xes_train_log_path) + self._xes_train_both_timestamps_log_path = self.base_directory / (self.event_log.process_name + ".xes") + self.event_log.train_to_xes(self._xes_train_both_timestamps_log_path) + self._xes_train_only_end_log_path = self.base_directory / (self.event_log.process_name + "_only_end.xes") + self.event_log.train_to_xes(self._xes_train_only_end_log_path, only_complete_events=True) else: # Process model provided self._need_to_discover_model = False @@ -75,15 +110,16 @@ def __init__(self, event_log: EventLog, bps_model: BPSModel, settings: ControlFl "prioritize_parallelism", "replace_or_joins", "output_dir", + "f_score" ] ) # Instantiate trials for hyper-optimization process self._bayes_trials = Trials() + self.iteration_index = 0 def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): # Report new iteration - print_subsection("Control-flow optimization iteration") - + print_subsection(f"Control-flow optimization iteration {self.iteration_index}") # Initialize status status = STATUS_OK # Create folder for this iteration @@ -122,6 +158,20 @@ def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): hyperopt_iteration_params.gateway_probabilities_method, ) + # Discover branch rules + if self.settings.discover_branch_rules: + status, current_bps_model.branch_rules = hyperopt_step( + status, + self._discover_branch_rules, + current_bps_model.process_model, + hyperopt_iteration_params + ) + + current_bps_model.gateway_probabilities = map_branch_rules_to_flows( + current_bps_model.gateway_probabilities, + current_bps_model.branch_rules + ) + # Simulate candidate and evaluate its quality status, evaluation_measurements = hyperopt_step( status, self._simulate_bps_model, current_bps_model, hyperopt_iteration_params.output_dir @@ -133,17 +183,33 @@ def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): ) print(f"Control-flow optimization iteration response: {response}") - # Save the quality of this evaluation + # Save the quality of this evaluation and increase iteration index self._process_measurements(hyperopt_iteration_params, status, evaluation_measurements) + self.iteration_index += 1 return response def run(self) -> HyperoptIterationParams: """ - Run Control-Flow & Gateway Probabilities discovery - :return: The parameters of the best iteration of the optimization process. + Runs the control-flow optimization process. + + This method defines the hyperparameter search space and executes a + TPE-hyperparameter optimization process to discover the best control-flow model. + It evaluates multiple iterations and selects the best-performing set of parameters + for its discovery. + + Returns + ------- + :class:`~simod.control_flow.settings.HyperoptIterationParams` + The parameters of the best iteration of the optimization process. + + Raises + ------ + AssertionError + If the best discovered process model path does not exist after optimization. """ # Define search space + self.iteration_index = 0 search_space = self._define_search_space(settings=self.settings) # Launch optimization process @@ -240,6 +306,12 @@ def _define_search_space(self, settings: ControlFlowSettings) -> dict: else: space["epsilon"] = settings.epsilon + if settings.discover_branch_rules and settings.f_score: + if isinstance(settings.f_score, tuple): + space["f_score"] = hp.uniform("f_score", settings.f_score[0], settings.f_score[1]) + else: + space["f_score"] = settings.f_score + return space def cleanup(self): @@ -290,9 +362,22 @@ def _process_measurements(self, params: HyperoptIterationParams, status, evaluat def _discover_process_model(self, params: HyperoptIterationParams) -> Path: print_step(f"Discovering Process Model with {params.mining_algorithm.value}") output_model_path = get_process_model_path(params.output_dir, self.event_log.process_name) - discover_process_model(self._xes_train_log_path, output_model_path, params) + if params.mining_algorithm is ProcessModelDiscoveryAlgorithm.SPLIT_MINER_V1: + discover_process_model(self._xes_train_only_end_log_path, output_model_path, params) + else: + discover_process_model(self._xes_train_both_timestamps_log_path, output_model_path, params) return output_model_path + def _discover_branch_rules(self, process_model: Path, params: HyperoptIterationParams) -> List[BranchRules]: + print_step(f"Discovering branch rules with f_score {params.f_score}") + bpmn_graph = BPMNGraph.from_bpmn_path(process_model) + return discover_branch_rules( + bpmn_graph, + self.event_log.train_partition, + self.event_log.log_ids, + f_score=params.f_score + ) + def _discover_gateway_probabilities( self, process_model: Path, gateway_probabilities_method: GatewayProbabilitiesDiscoveryMethod ) -> List[GatewayProbabilities]: @@ -309,7 +394,6 @@ def _simulate_bps_model(self, bps_model: BPSModel, output_dir: Path) -> List[dic bps_model.replace_activity_names_with_ids() json_parameters_path = bps_model.to_json(output_dir, self.event_log.process_name) - evaluation_measures = simulate_and_evaluate( process_model_path=bps_model.process_model, parameters_path=json_parameters_path, diff --git a/src/simod/control_flow/settings.py b/src/simod/control_flow/settings.py index ca6e399d..a82c7d4f 100644 --- a/src/simod/control_flow/settings.py +++ b/src/simod/control_flow/settings.py @@ -10,7 +10,42 @@ @dataclass class HyperoptIterationParams: - """Parameters for a single iteration of the Control-Flow optimization process.""" + """ + Parameters for a single iteration of the Control-Flow optimization process. + + This class defines the configuration settings used during an iteration of the + optimization process, including process model discovery, optimization metric, + and gateway probability discovery. + + Attributes + ---------- + output_dir : :class:`pathlib.Path` + Directory where all output files for the current iteration will be stored. + provided_model_path : :class:`pathlib.Path`, optional + Path to a provided BPMN model, if available (no discovery needed). + project_name : str + Name of the project, mainly used for file naming. + optimization_metric : :class:`Metric` + Metric used to evaluate the candidate process model in this iteration. + gateway_probabilities_method : :class:`GatewayProbabilitiesDiscoveryMethod` + Method for discovering gateway probabilities. + mining_algorithm : :class:`ProcessModelDiscoveryAlgorithm` + Algorithm used for process model discovery, if necessary. + epsilon : float, optional + Number of concurrent relations between events to be captured in the discovery algorithm (between 0.0 and 1.0). + eta : float, optional + Threshold for filtering the incoming and outgoing edges in the discovery algorithm (between 0.0 and 1.0). + replace_or_joins : bool, optional + Whether to replace non-trivial OR joins in the discovered model. + prioritize_parallelism : bool, optional + Whether to prioritize parallelism or loops for model discovery. + f_score : float], default=Non, optional + Minimum f-score value to consider the discovered data-aware branching rules. + + Notes + ----- + - If `provided_model_path` is specified, process model discovery will be skipped. + """ # General settings output_dir: Path # Directory where to output all the files of the current iteration @@ -26,9 +61,20 @@ class HyperoptIterationParams: eta: Optional[float] # Percentile for frequency threshold (eta) replace_or_joins: Optional[bool] # Should replace non-trivial OR joins prioritize_parallelism: Optional[bool] # Should prioritize parallelism on loops + f_score: Optional[float] = None # quality gateway for branch rules (f_score) def to_dict(self) -> dict: - """Returns a dictionary with the parameters for this run.""" + """ + Converts the instance into a dictionary representation of the optimization parameters. + + The returned dictionary is structured based on whether a process model needs + to be discovered or if a pre-existing model is provided. + + Returns + ------- + dict + A dictionary containing the optimization parameters for this iteration. + """ optimization_parameters = { "output_dir": str(self.output_dir), "project_name": str(self.project_name), @@ -48,6 +94,10 @@ def to_dict(self) -> dict: else: optimization_parameters["provided_model_path"] = str(self.provided_model_path) + if self.f_score: + optimization_parameters["discover_branch_rules"] = True + optimization_parameters["f_score"] = self.f_score + return optimization_parameters @staticmethod @@ -75,6 +125,8 @@ def from_hyperopt_dict( elif mining_algorithm == ProcessModelDiscoveryAlgorithm.SPLIT_MINER_V2: epsilon = hyperopt_dict["epsilon"] + f_score = hyperopt_dict.get("f_score", None) + return HyperoptIterationParams( output_dir=output_dir, provided_model_path=provided_model_path, @@ -86,4 +138,5 @@ def from_hyperopt_dict( eta=eta, prioritize_parallelism=prioritize_parallelism, replace_or_joins=replace_or_joins, + f_score=f_score ) diff --git a/src/simod/data_attributes/__init__.py b/src/simod/data_attributes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/simod/data_attributes/discovery.py b/src/simod/data_attributes/discovery.py new file mode 100644 index 00000000..9d7fe383 --- /dev/null +++ b/src/simod/data_attributes/discovery.py @@ -0,0 +1,31 @@ +import pandas as pd + +from simod.data_attributes.types import GlobalAttribute, CaseAttribute, EventAttribute + +from pix_framework.io.event_log import EventLogIDs +from pix_framework.discovery.attributes.attribute_discovery import discover_attributes + + +def discover_data_attributes(log: pd.DataFrame, log_ids: EventLogIDs) -> (list[CaseAttribute], list[GlobalAttribute], list[EventAttribute]): + """ + Discover data attributes from a log ignoring common non-case columns. + """ + attributes = discover_attributes( + event_log=log, + log_ids=log_ids, + avoid_columns=[ + log_ids.case, + log_ids.activity, + log_ids.enabled_time, + log_ids.start_time, + log_ids.end_time, + log_ids.resource, + ], + confidence_threshold=0.95, + ) + + global_attributes = list(map(GlobalAttribute.from_dict, attributes["global_attributes"])) + case_attributes = list(map(CaseAttribute.from_dict, attributes["case_attributes"])) + event_attributes = list(map(EventAttribute.from_dict, attributes["event_attributes"])) + + return global_attributes, case_attributes, event_attributes diff --git a/src/simod/data_attributes/types.py b/src/simod/data_attributes/types.py new file mode 100644 index 00000000..2bd1d094 --- /dev/null +++ b/src/simod/data_attributes/types.py @@ -0,0 +1,151 @@ +from dataclasses import dataclass +from enum import Enum +from typing import Union + + +class CaseAttributeType(Enum): + DISCRETE = "discrete" + CONTINUOUS = "continuous" + + +class GlobalAttributeType(Enum): + DISCRETE = "discrete" + CONTINUOUS = "continuous" + + +class EventAttributeType(Enum): + DISCRETE = "discrete" + CONTINUOUS = "continuous" + EXPRESSION = "expression" + DTREE = "dtree" + + +@dataclass +class CaseAttribute: + name: str + type: CaseAttributeType + values: Union[list[dict], dict[str, float]] + + @staticmethod + def from_dict(case_attribute: dict) -> "CaseAttribute": + """ + Creates a CaseAttribute object from a dictionary returned by data_attribute_discovery.discovery. + """ + return CaseAttribute( + name=case_attribute["name"], + type=CaseAttributeType(case_attribute["type"]), + values=case_attribute["values"], + ) + + def to_prosimos(self) -> dict: + if self.type == CaseAttributeType.CONTINUOUS: + return { + "name": self.name, + "type": self.type.value, + "values": self.values, + } + else: + return { + "name": self.name, + "type": self.type.value, + "values": self.values + } + + +@dataclass +class GlobalAttribute: + name: str + type: GlobalAttributeType + values: Union[list[dict], dict[str, float]] + + @staticmethod + def from_dict(global_attribute: dict) -> "GlobalAttribute": + """ + Creates a GlobalAttribute object from a dictionary returned by data_attribute_discovery.discovery. + """ + return GlobalAttribute( + name=global_attribute["name"], + type=GlobalAttributeType(global_attribute["type"]), + values=global_attribute["values"], + ) + + def to_prosimos(self) -> dict: + if self.type == GlobalAttributeType.CONTINUOUS: + return { + "name": self.name, + "type": self.type.value, + "values": self.values, + } + else: + return { + "name": self.name, + "type": self.type.value, + "values": self.values + } + + +@dataclass +class EventAttributeDetails: + name: str + type: EventAttributeType + values: Union[list[dict[str, float]], dict[str, Union[str, list[dict[str, float]]]], str] + + @staticmethod + def from_dict(attribute: dict) -> "EventAttributeDetails": + """ + Creates an EventAttributeDetails object from a dictionary returned by data_attribute_discovery.discovery. + """ + return EventAttributeDetails( + name=attribute["name"], + type=EventAttributeType(attribute["type"]), + values=attribute["values"], + ) + + def to_prosimos(self) -> dict: + if self.type == EventAttributeType.CONTINUOUS: + return { + "name": self.name, + "type": self.type.value, + "values": self.values, + } + elif self.type == EventAttributeType.DISCRETE: + return { + "name": self.name, + "type": self.type.value, + "values": self.values + + } + elif self.type == EventAttributeType.EXPRESSION: + return { + "name": self.name, + "type": self.type.value, + "values": self.values, + } + elif self.type == EventAttributeType.DTREE: + return { + "name": self.name, + "type": self.type.value, + "values": self.values + } + + +@dataclass +class EventAttribute: + event_id: str + attributes: list[EventAttributeDetails] + + @staticmethod + def from_dict(event_attribute: dict) -> "EventAttribute": + """ + Creates an EventAttribute object from a dictionary. + """ + return EventAttribute( + event_id=event_attribute["event_id"], + attributes=[EventAttributeDetails.from_dict(attr) for attr in event_attribute["attributes"]], + ) + + def to_prosimos(self) -> dict: + return { + "event_id": self.event_id, + "attributes": [attr.to_prosimos() for attr in self.attributes], + } diff --git a/src/simod/event_log/event_log.py b/src/simod/event_log/event_log.py index c15fb96c..87d0267d 100644 --- a/src/simod/event_log/event_log.py +++ b/src/simod/event_log/event_log.py @@ -2,20 +2,38 @@ from typing import Optional import pandas as pd +import pendulum +from openxes_cli.lib import csv_to_xes from pix_framework.io.event_log import DEFAULT_XES_IDS, EventLogIDs, read_csv_log from pix_framework.io.event_log import split_log_training_validation_trace_wise as split_log from .preprocessor import Preprocessor -from .utilities import convert_df_to_xes from ..settings.preprocessing_settings import PreprocessingSettings from ..utilities import get_process_name_from_log_path class EventLog: """ - Event log class that contains the log and its splits, and column names. - - Use static methods to create an EventLog from a path in other ways that are implemented. + Represents an event log containing process execution data and its partitioned subsets. + + This class provides functionality for storing and managing an event log, including + training, validation, and test partitions. It also supports exporting logs to XES format + and loading event logs from files. + + Attributes + ---------- + train_partition : :class:`pandas.DataFrame` + DataFrame containing the training partition of the event log. + validation_partition : :class:`pandas.DataFrame` + DataFrame containing the validation partition of the event log. + train_validation_partition : :class:`pandas.DataFrame` + DataFrame containing both training and validation data. + test_partition : :class:`pandas.DataFrame` + DataFrame containing the test partition of the event log, if available. + log_ids : :class:`EventLogIDs` + Identifiers for mapping column names in the event log. + process_name : str + The name of the business process associated with the event log, primarily used for file naming. """ train_partition: pd.DataFrame @@ -56,7 +74,34 @@ def from_path( split_ratio: float = 0.8, ) -> "EventLog": """ - Loads an event log from a file and does the log split for training, validation, and test. + Loads an event log from a file and performs partitioning into training, validation, and test subsets. + + Parameters + ---------- + train_log_path : :class:`pathlib.Path` + Path to the training event log file (CSV or CSV.GZ). + log_ids : :class:`EventLogIDs` + Identifiers for mapping column names in the event log. + preprocessing_settings : :class:`PreprocessingSettings`, optional + Settings for preprocessing the event log. + need_test_partition : bool, optional + Whether to create a test partition if a separate test log is not provided. + process_name : str, optional + Name of the business process. If not provided, it is inferred from the file name. + test_log_path : :class:`pathlib.Path`, optional + Path to the test event log file (CSV or CSV.GZ). If provided, the test log is loaded separately. + split_ratio : float, default=0.8 + Ratio for splitting training and validation partitions. + + Returns + ------- + :class:`EventLog` + An instance of :class:`EventLog` with training, validation, and test partitions. + + Raises + ------ + ValueError + If the specified training or test log has an unsupported file extension. """ # Check event log prerequisites if not train_log_path.name.endswith(".csv") and not train_log_path.name.endswith(".csv.gz"): @@ -106,40 +151,89 @@ def from_path( process_name=get_process_name_from_log_path(train_log_path) if process_name is None else process_name, ) - def train_to_xes(self, path: Path): + def train_to_xes(self, path: Path, only_complete_events: bool = False): """ - Saves the training log to a XES file. + Saves the training log to an XES file. + + Parameters + ---------- + path : :class:`pathlib.Path` + Destination path for the XES file. + only_complete_events : bool + If true, generate XES file containing only events corresponding to + the end of each activity instance. """ - write_xes(self.train_partition, self.log_ids, path) + write_xes(self.train_partition, self.log_ids, path, only_complete_events=only_complete_events) - def validation_to_xes(self, path: Path): + def validation_to_xes(self, path: Path, only_complete_events: bool = False): """ - Saves the validation log to a XES file. + Saves the validation log to an XES file. + + Parameters + ---------- + path : :class:`pathlib.Path` + Destination path for the XES file. + only_complete_events : bool + If true, generate XES file containing only events corresponding to + the end of each activity instance. """ - write_xes(self.validation_partition, self.log_ids, path) + write_xes(self.validation_partition, self.log_ids, path, only_complete_events=only_complete_events) - def train_validation_to_xes(self, path: Path): + def train_validation_to_xes(self, path: Path, only_complete_events: bool = False): """ - Saves the validation log to a XES file. + Saves the combined training and validation log to an XES file. + + Parameters + ---------- + path : :class:`pathlib.Path` + Destination path for the XES file. + only_complete_events : bool + If true, generate XES file containing only events corresponding to + the end of each activity instance. """ - write_xes(self.train_validation_partition, self.log_ids, path) + write_xes(self.train_validation_partition, self.log_ids, path, only_complete_events=only_complete_events) - def test_to_xes(self, path: Path): + def test_to_xes(self, path: Path, only_complete_events: bool = False): """ - Saves the test log to a XES file. + Saves the test log to an XES file. + + Parameters + ---------- + path : :class:`pathlib.Path` + Destination path for the XES file. + only_complete_events : bool + If true, generate XES file containing only events corresponding to + the end of each activity instance. """ - write_xes(self.test_partition, self.log_ids, path) + write_xes(self.test_partition, self.log_ids, path, only_complete_events=only_complete_events) def write_xes( - log: pd.DataFrame, + event_log: pd.DataFrame, log_ids: EventLogIDs, output_path: Path, + only_complete_events: bool = False, ): """ Writes the log to a file in XES format. """ - df = log.rename( + # Copy event log to modify + df = event_log.copy() + # Transform timestamps to expected format + xes_datetime_format = "YYYY-MM-DDTHH:mm:ss.SSSZ" + # Start time + if only_complete_events: + df[log_ids.start_time] = "" + else: + df[log_ids.start_time] = df[log_ids.start_time].apply( + lambda x: pendulum.parse(x.isoformat()).format(xes_datetime_format) + ) + # End time + df[log_ids.end_time] = df[log_ids.end_time].apply( + lambda x: pendulum.parse(x.isoformat()).format(xes_datetime_format) + ) + # Rename columns to XES expected + df = df.rename( columns={ log_ids.activity: "concept:name", log_ids.case: "case:concept:name", @@ -147,18 +241,9 @@ def write_xes( log_ids.start_time: "start_timestamp", log_ids.end_time: "time:timestamp", } - ) - - df = df[ - [ - "case:concept:name", - "concept:name", - "org:resource", - "start_timestamp", - "time:timestamp", - ] - ] - + )[["case:concept:name", "concept:name", "org:resource", "start_timestamp", "time:timestamp", ]] + # Fill null values df.fillna("UNDEFINED", inplace=True) - - convert_df_to_xes(df, DEFAULT_XES_IDS, output_path) + # Write and convert + df.to_csv(output_path, index=False) + csv_to_xes(output_path, output_path) diff --git a/src/simod/event_log/preprocessor.py b/src/simod/event_log/preprocessor.py index 721f73ec..803417e4 100644 --- a/src/simod/event_log/preprocessor.py +++ b/src/simod/event_log/preprocessor.py @@ -28,7 +28,18 @@ class Settings: class Preprocessor: """ - Preprocessor executes event log pre-processing according to the `run()` arguments and returns the modified log back. + Handles event log pre-processing by executing various transformations + to estimate missing timestamps and adjust data for multitasking. + + This class modifies an input event log based on the specified settings + and returns the pre-processed log. + + Attributes + ---------- + log : :class:`pandas.DataFrame` + The event log stored as a DataFrame. + log_ids : :class:`EventLogIDs` + Identifiers for mapping column names in the event log. """ _log: pd.DataFrame @@ -46,14 +57,24 @@ def run( enable_time_concurrency_threshold: float = 0.75, ) -> pd.DataFrame: """ - Executes all pre-processing steps and updates the configuration if necessary. - - Start times discovery is always executed if the log does not contain the start time column. - - :param multitasking: Whether to adjust the timestamps for multitasking. - :param concurrency_thresholds: Thresholds for the Heuristics Miner to estimate start/enabled times. - :param enable_time_concurrency_threshold: Threshold for the Heuristics Miner to estimate enabled times. - :return: The pre-processed event log. + Executes event log pre-processing steps based on the specified parameters. + + This includes estimating missing start times, adjusting timestamps + for multitasking scenarios, and computing enabled times. + + Parameters + ---------- + multitasking : bool + Whether to adjust the timestamps for multitasking. + concurrency_thresholds : :class:`ConcurrencyThresholds`, optional + Thresholds for the Heuristics Miner to estimate start times. + enable_time_concurrency_threshold : float + Threshold for estimating enabled times. + + Returns + ------- + :class:`pandas.DataFrame` + The pre-processed event log. """ print_section("Pre-processing") diff --git a/src/simod/event_log/utilities.py b/src/simod/event_log/utilities.py deleted file mode 100644 index ed55678c..00000000 --- a/src/simod/event_log/utilities.py +++ /dev/null @@ -1,18 +0,0 @@ -from pathlib import Path - -import pandas as pd -import pendulum -from openxes_cli.lib import csv_to_xes -from pix_framework.io.event_log import EventLogIDs - - -def convert_df_to_xes(df: pd.DataFrame, log_ids: EventLogIDs, output_path: Path): - xes_datetime_format = "YYYY-MM-DDTHH:mm:ss.SSSZ" - df[log_ids.start_time] = df[log_ids.start_time].apply( - lambda x: pendulum.parse(x.isoformat()).format(xes_datetime_format) - ) - df[log_ids.end_time] = df[log_ids.end_time].apply( - lambda x: pendulum.parse(x.isoformat()).format(xes_datetime_format) - ) - df.to_csv(output_path, index=False) - csv_to_xes(output_path, output_path) diff --git a/src/simod/extraneous_delays/optimizer.py b/src/simod/extraneous_delays/optimizer.py index 844bf870..363be4bb 100644 --- a/src/simod/extraneous_delays/optimizer.py +++ b/src/simod/extraneous_delays/optimizer.py @@ -4,7 +4,6 @@ from extraneous_activity_delays.config import ( Configuration as ExtraneousActivityDelaysConfiguration, - DiscoveryMethod, TimerPlacement, SimulationEngine, SimulationModel, @@ -21,6 +20,24 @@ class ExtraneousDelaysOptimizer: + """ + Optimizer for the discovery of the extraneous delays model. + + This class performs either a direct discovery of the extraneous delays of the process, or launches an iterative + optimization that first discovers the extraneous delays and then adjusts their size to better reflect reality. + + Attributes + ---------- + event_log : :class:`~simod.event_log.event_log.EventLog` + The event log containing the train and validation data. + bps_model : :class:`~simod.simulation.parameters.BPS_model.BPSModel` + The business process simulation model to enhance with extraneous delays, including the BPMN representation. + settings : :class:`~simod.settings.extraneous_delays_settings.ExtraneousDelaysSettings` + Configuration settings for extraneous delay discovery. + base_directory : :class:`pathlib.Path` + Directory where output files will be stored. + """ + def __init__( self, event_log: EventLog, @@ -36,6 +53,19 @@ def __init__( assert self.bps_model.process_model is not None, "BPMN model is not specified." def run(self) -> List[ExtraneousDelay]: + """ + Executes the extraneous delay discovery process. + + This method configures the optimization process, applies either a direct enhancement + or a hyperparameter optimization approach to identify delays, and returns the best + detected delays as a list of `ExtraneousDelay` objects. + + Returns + ------- + List[:class:`~simod.extraneous_delays.types.ExtraneousDelay`] + A list of detected extraneous delays, each containing activity names, delay IDs, + and their corresponding duration distributions. + """ # Set-up configuration for extraneous delay discovery configuration = ExtraneousActivityDelaysConfiguration( log_ids=self.event_log.log_ids, diff --git a/src/simod/extraneous_delays/types.py b/src/simod/extraneous_delays/types.py index 81e57427..d73e083e 100644 --- a/src/simod/extraneous_delays/types.py +++ b/src/simod/extraneous_delays/types.py @@ -5,11 +5,40 @@ @dataclass class ExtraneousDelay: + """ + Represents an extraneous delay within a business process activity. + + This class encapsulates the details of an identified extraneous delay, + including the affected activity, a unique delay identifier, and the + duration distribution of the delay. + + Attributes + ---------- + activity_name : str + The name of the activity where the extraneous delay occurs. + delay_id : str + A unique identifier for the delay event. + duration_distribution : :class:`DurationDistribution` + The statistical distribution representing the delay duration. + """ + activity_name: str delay_id: str duration_distribution: DurationDistribution def to_dict(self) -> dict: + """ + Converts the extraneous delay into a dictionary format. + + The dictionary representation is compatible with the Prosimos simulation + engine, containing activity details, a unique event identifier, and the + delay duration distribution. + + Returns + ------- + dict + A dictionary representation of the extraneous delay. + """ return { "activity": self.activity_name, "event_id": self.delay_id, @@ -17,6 +46,22 @@ def to_dict(self) -> dict: @staticmethod def from_dict(delay: dict) -> "ExtraneousDelay": + """ + Creates an `ExtraneousDelay` instance from a dictionary. + + This method reconstructs an `ExtraneousDelay` object from a dictionary + containing activity name, delay identifier, and duration distribution. + + Parameters + ---------- + delay : dict + A dictionary representation of an extraneous delay. + + Returns + ------- + :class:`ExtraneousDelay` + An instance of `ExtraneousDelay` with the extracted attributes. + """ return ExtraneousDelay( activity_name=delay["activity"], delay_id=delay["event_id"], diff --git a/src/simod/extraneous_delays/utilities.py b/src/simod/extraneous_delays/utilities.py index 1258cd86..84dc1b10 100644 --- a/src/simod/extraneous_delays/utilities.py +++ b/src/simod/extraneous_delays/utilities.py @@ -15,14 +15,33 @@ def add_timers_to_bpmn_model( timer_placement: TimerPlacement = TimerPlacement.BEFORE, ): """ - Enhance the BPMN model received by adding a timer previous (or after) to each activity denoted by [timers]. + Enhances a BPMN model by adding timers before or after specified activities. - :param process_model: Path to the process model (in BPMN format) to enhance. - :param delays: Dict with the name of each activity as key, and the timer configuration as value. - :param timer_placement: Option to consider the placement of the timers either BEFORE (the extraneous delay - is considered to be happening previously to an activity instance) or AFTER (the - extraneous delay is considered to be happening afterward an activity instance) each - activity. + This function modifies a given BPMN process model by inserting timers + before or after activities that have identified extraneous delays. + + Parameters + ---------- + process_model : :class:`pathlib.Path` + Path to the BPMN process model file to enhance. + delays : List[:class:`~simod.extraneous_delays.types.ExtraneousDelay`] + A list of extraneous delays, where each delay specifies an activity + and the corresponding timer configuration. + timer_placement : :class:`TimerPlacement`, optional + Specifies whether the timers should be placed **BEFORE** (indicating the + delay happens before an activity instance) or **AFTER** (indicating the + delay happens afterward). Default is `TimerPlacement.BEFORE`. + + Notes + ----- + - This function modifies the BPMN file in place. + - The method searches for tasks within the BPMN model and inserts timers + based on the provided delays. + + Raises + ------ + ValueError + If the BPMN model file does not contain any tasks. """ if len(delays) > 0: # Extract process diff --git a/src/simod/metrics.py b/src/simod/metrics.py index 9aeacfd4..f66d7bc2 100644 --- a/src/simod/metrics.py +++ b/src/simod/metrics.py @@ -9,6 +9,7 @@ from log_distance_measures.circadian_event_distribution import ( circadian_event_distribution_distance, ) +from log_distance_measures.circadian_workforce_distribution import circadian_workforce_distribution_distance from log_distance_measures.config import AbsoluteTimestampType from log_distance_measures.control_flow_log_distance import control_flow_log_distance from log_distance_measures.cycle_time_distribution import ( @@ -47,6 +48,8 @@ def compute_metric( result = get_n_grams_distribution_distance(original_log, original_log_ids, simulated_log, simulated_log_ids, 3) elif metric is Metric.CIRCADIAN_EMD: result = get_circadian_emd(original_log, original_log_ids, simulated_log, simulated_log_ids) + elif metric is Metric.CIRCADIAN_WORKFORCE_EMD: + result = get_circadian_workforce_emd(original_log, original_log_ids, simulated_log, simulated_log_ids) elif metric is Metric.ARRIVAL_EMD: result = get_arrival_emd(original_log, original_log_ids, simulated_log, simulated_log_ids) elif metric is Metric.RELATIVE_EMD: @@ -122,6 +125,25 @@ def get_circadian_emd( return emd +def get_circadian_workforce_emd( + original_log: pd.DataFrame, + original_log_ids: EventLogIDs, + simulated_log: pd.DataFrame, + simulated_log_ids: EventLogIDs, +) -> float: + """ + Distance measure computing how different the histograms of the active resources of two event logs are, comparing the + average number of active resources recorded each weekday at each hour (e.g., Monday 10am). + """ + emd = circadian_workforce_distribution_distance( + original_log, + original_log_ids, + simulated_log, + simulated_log_ids, + ) + return emd + + def get_arrival_emd( original_log: pd.DataFrame, original_log_ids: EventLogIDs, diff --git a/src/simod/prioritization/discovery.py b/src/simod/prioritization/discovery.py index fe6222a3..e1853bc9 100644 --- a/src/simod/prioritization/discovery.py +++ b/src/simod/prioritization/discovery.py @@ -2,7 +2,7 @@ from pix_framework.discovery.prioritization.discovery import discover_priority_rules from pix_framework.io.event_log import EventLogIDs -from ..case_attributes.types import CaseAttribute +from ..data_attributes.types import CaseAttribute from .types import PrioritizationRule diff --git a/src/simod/resource_model/optimizer.py b/src/simod/resource_model/optimizer.py index dc32c422..63182a43 100644 --- a/src/simod/resource_model/optimizer.py +++ b/src/simod/resource_model/optimizer.py @@ -28,6 +28,37 @@ class ResourceModelOptimizer: + """ + Optimizes the resource model of a business process model using hyperparameter optimization. + + This class performs iterative optimization to refine the resource model + and discover optimal resource profiles and availability calendars. It + evaluates different configurations to improve the process model based + on a given metric. + + The search space is built based on the parameters ranges in [settings]. + + Attributes + ---------- + event_log : :class:`~simod.event_log.event_log.EventLog` + Event log containing train and validation partitions. + initial_bps_model : :class:`~simod.simulation.parameters.BPS_model.BPSModel` + Business process simulation (BPS) model to use as a base, by replacing its resource model + with the discovered one in each iteration. + settings : :class:`~simod.settings.resource_model_settings.ResourceModelSettings` + Configuration settings to build the search space for the optimization process. + base_directory : :class:`pathlib.Path` + Root directory where output files will be stored. + best_bps_model : :class:`~simod.simulation.parameters.BPS_model.BPSModel`, optional + Best discovered BPS model after the optimization process. + evaluation_measurements : :class:`pandas.DataFrame` + Quality measures recorded for each hyperopt iteration. + + Notes + ----- + - Optimization is performed using TPE-hyperparameter optimization. + """ + # Event log with train/validation partitions event_log: EventLog # BPS model taken as starting point @@ -74,6 +105,7 @@ def __init__( ) # Instantiate trials for hyper-optimization process self._bayes_trials = Trials() + self.iteration_index = 0 # Discover resource pools (performance purposes) if needed if self.settings.discovery_type is CalendarType.DIFFERENTIATED_BY_POOL: self._resource_pools = discover_pool_resource_profiles( @@ -103,7 +135,7 @@ def __init__( def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): # Report new iteration - print_subsection("Resource Model optimization iteration") + print_subsection(f"Resource Model optimization iteration {self.iteration_index}") # Initialize status status = STATUS_OK @@ -127,6 +159,7 @@ def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): status, current_bps_model.resource_model = hyperopt_step( status, self._discover_resource_model, hyperopt_iteration_params.calendar_discovery_params ) + current_bps_model.calendar_granularity = hyperopt_iteration_params.calendar_discovery_params.granularity if self.model_activities is not None: repair_with_missing_activities( @@ -149,8 +182,7 @@ def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): status, self._simulate_bps_model, current_bps_model, - hyperopt_iteration_params.output_dir, - hyperopt_iteration_params.calendar_discovery_params.granularity, + hyperopt_iteration_params.output_dir ) # Define the response of this iteration @@ -159,21 +191,32 @@ def _hyperopt_iteration(self, hyperopt_iteration_dict: dict): ) print(f"Resource Model optimization iteration response: {response}") - # Save the quality of this evaluation + # Save the quality of this evaluation and increase iteration index self._process_measurements(hyperopt_iteration_params, status, evaluation_measurements) + self.iteration_index += 1 return response def run(self) -> HyperoptIterationParams: """ - Run Resource Model (resource profiles, resource calendars and activity-resource performance) discovery. - :return: The parameters of the best iteration of the optimization process. + Runs the resource model optimization process. + + This method defines the hyperparameter search space and executes a + TPE-hyperparameter optimization process to discover the best resource model. + It evaluates multiple iterations and selects the best-performing set of parameters + for its discovery. + + Returns + ------- + :class:`~simod.resource_model.settings.HyperoptIterationParams` + The parameters of the best iteration of the optimization process. """ # Define search space + self.iteration_index = 0 search_space = self._define_search_space(settings=self.settings) # Launch optimization process - best_hyperopt_params = fmin( + params_best_iteration = fmin( fn=self._hyperopt_iteration, space=search_space, algo=tpe.suggest, @@ -181,7 +224,7 @@ def run(self) -> HyperoptIterationParams: trials=self._bayes_trials, show_progressbar=False, ) - best_hyperopt_params = hyperopt.space_eval(search_space, best_hyperopt_params) + params_best_iteration = hyperopt.space_eval(search_space, params_best_iteration) # Process best results results = pd.DataFrame(self._bayes_trials.results).sort_values("loss") @@ -189,7 +232,7 @@ def run(self) -> HyperoptIterationParams: # Re-build parameters of the best hyperopt iteration best_hyperopt_parameters = HyperoptIterationParams.from_hyperopt_dict( - hyperopt_dict=best_hyperopt_params, + hyperopt_dict=params_best_iteration, optimization_metric=self.settings.optimization_metric, discovery_type=self.settings.discovery_type, output_dir=best_result["output_dir"], @@ -209,6 +252,7 @@ def run(self) -> HyperoptIterationParams: ) # Update resource model self.best_bps_model.resource_model = ResourceModel.from_dict(json.load(open(best_parameters_path, "r"))) + self.best_bps_model.calendar_granularity = best_hyperopt_parameters.calendar_discovery_params.granularity # Save evaluation measurements self.evaluation_measurements.sort_values("distance", ascending=True, inplace=True) @@ -334,10 +378,10 @@ def _define_response( # Return updated status and processed response return status, response - def _simulate_bps_model(self, bps_model: BPSModel, output_dir: Path, granularity: int) -> List[dict]: + def _simulate_bps_model(self, bps_model: BPSModel, output_dir: Path) -> List[dict]: bps_model.replace_activity_names_with_ids() - json_parameters_path = bps_model.to_json(output_dir, self.event_log.process_name, granule_size=granularity) + json_parameters_path = bps_model.to_json(output_dir, self.event_log.process_name) evaluation_measures = simulate_and_evaluate( process_model_path=bps_model.process_model, diff --git a/src/simod/resource_model/settings.py b/src/simod/resource_model/settings.py index ee4cb48f..5fa6ba03 100644 --- a/src/simod/resource_model/settings.py +++ b/src/simod/resource_model/settings.py @@ -12,7 +12,29 @@ @dataclass class HyperoptIterationParams: - """Parameters for a single iteration of the Resource Model optimization process.""" + """ + Parameters for a single iteration of the Resource Model optimization process. + + This class defines the necessary parameters for optimizing the resource model of the BPS model. + It includes the parameter values for the discovery of resource profiles, calendars, etc. + + Attributes + ---------- + output_dir : :class:`pathlib.Path` + Directory where all files of the current iteration will be stored. + process_model_path : :class:`pathlib.Path` + Path to the BPMN process model used for optimization. + project_name : str + Name of the project for file naming purposes. + optimization_metric : :class:`~simod.settings.common_settings.Metric` + Metric used to evaluate the quality of the current iteration's candidate. + calendar_discovery_params : :class:`CalendarDiscoveryParameters` + Parameters for the resource calendar (i.e., working schedules) discovery. + discover_prioritization_rules : bool, optional + Whether to attempt discovering prioritization rules (default: False). + discover_batching_rules : bool, optional + Whether to attempt discovering batching rules (default: False). + """ # General settings output_dir: Path # Directory where to output all the files of the current iteration @@ -25,7 +47,14 @@ class HyperoptIterationParams: discover_batching_rules: bool = False # Whether to try to add batching or not def to_dict(self) -> dict: - """Returns a dictionary with the parameters for this run.""" + """ + Converts the parameters of the current iteration into a dictionary format. + + Returns + ------- + dict + A dictionary containing the iteration parameters. + """ # Save common params optimization_parameters = { "output_dir": str(self.output_dir), diff --git a/src/simod/runtime_meter.py b/src/simod/runtime_meter.py new file mode 100644 index 00000000..bb4d38fb --- /dev/null +++ b/src/simod/runtime_meter.py @@ -0,0 +1,34 @@ +import json +import timeit + + +class RuntimeMeter: + + runtime_start: dict + runtime_stop: dict + runtimes: dict + + TOTAL: str = "SIMOD_TOTAL_RUNTIME" + PREPROCESSING: str = "preprocessing" + INITIAL_MODEL: str = "discover-initial-BPS-model" + CONTROL_FLOW_MODEL: str = "optimize-control-flow-model" + RESOURCE_MODEL: str = "optimize-resource-model" + DATA_ATTRIBUTES_MODEL: str = "discover-data-attributes" + EXTRANEOUS_DELAYS: str = "discover-extraneous-delays" + FINAL_MODEL: str = "discover-final-BPS-model" + EVALUATION: str = "evaluate-final-BPS-model" + + def __init__(self): + self.runtime_start = dict() + self.runtime_stop = dict() + self.runtimes = dict() + + def start(self, stage_name: str): + self.runtime_start[stage_name] = timeit.default_timer() + + def stop(self, stage_name: str): + self.runtime_stop[stage_name] = timeit.default_timer() + self.runtimes[stage_name] = self.runtime_stop[stage_name] - self.runtime_start[stage_name] + + def to_json(self) -> str: + return json.dumps(self.runtimes) diff --git a/src/simod/settings/__init__.py b/src/simod/settings/__init__.py index c09f0f03..9aab2dd8 100644 --- a/src/simod/settings/__init__.py +++ b/src/simod/settings/__init__.py @@ -1,4 +1,5 @@ __all__ = [ + "common_settings", "control_flow_settings", "extraneous_delays_settings", "simod_settings", diff --git a/src/simod/settings/common_settings.py b/src/simod/settings/common_settings.py index 78114dc4..484f21b2 100644 --- a/src/simod/settings/common_settings.py +++ b/src/simod/settings/common_settings.py @@ -14,10 +14,36 @@ class Metric(str, Enum): + """ + Enum class storing the metrics used to evaluate the quality of a BPS model. + + Attributes + ---------- + DL : str + Control-flow Log Distance metric based in the Damerau-Levenshtein distance. + TWO_GRAM_DISTANCE : str + Two-gram distance metric. + THREE_GRAM_DISTANCE : str + Three-gram distance metric. + CIRCADIAN_EMD : str + Earth Mover's Distance (EMD) for circadian event distribution. + CIRCADIAN_WORKFORCE_EMD : str + EMD for circadian workforce distribution. + ARRIVAL_EMD : str + EMD for arrival event distribution. + RELATIVE_EMD : str + EMD for relative event distribution. + ABSOLUTE_EMD : str + EMD for absolute event distribution. + CYCLE_TIME_EMD : str + EMD for cycle time distribution. + """ + DL = "dl" TWO_GRAM_DISTANCE = "two_gram_distance" THREE_GRAM_DISTANCE = "three_gram_distance" CIRCADIAN_EMD = "circadian_event_distribution" + CIRCADIAN_WORKFORCE_EMD = "circadian_workforce_distribution" ARRIVAL_EMD = "arrival_event_distribution" RELATIVE_EMD = "relative_event_distribution" ABSOLUTE_EMD = "absolute_event_distribution" @@ -25,6 +51,26 @@ class Metric(str, Enum): @classmethod def from_str(cls, value: Union[str, List[str]]) -> "Union[Metric, List[Metric]]": + """ + Converts a string (or list of strings) representing metric names into an instance (or list of instances) + of the :class:`Metric` enum. + + Parameters + ---------- + value : Union[str, List[str]] + A string representing a metric name or a list of metric names. + + Returns + ------- + Union[:class:`Metric`, List[:class:`Metric`]] + An instance of :class:`Metric` if a single string is provided, + or a list of :class:`Metric` instances if a list of strings is provided. + + Raises + ------ + ValueError + If the provided string does not match any metric name. + """ if isinstance(value, str): return Metric._from_str(value) elif isinstance(value, list): @@ -40,6 +86,8 @@ def _from_str(cls, value: str) -> "Metric": return cls.THREE_GRAM_DISTANCE elif value.lower() in ["circadian_event_distribution", "circadian_emd"]: return cls.CIRCADIAN_EMD + elif value.lower() in ["circadian_workforce_distribution", "workforce_emd", "circadian_workforce"]: + return cls.CIRCADIAN_WORKFORCE_EMD elif value.lower() in ["arrival_event_distribution", "arrival_emd"]: return cls.ARRIVAL_EMD elif value.lower() in ["relative_event_distribution", "relative_emd"]: @@ -66,6 +114,8 @@ def __str__(self): return "THREE_GRAM_DISTANCE" elif self == Metric.CIRCADIAN_EMD: return "CIRCADIAN_EVENT_DISTRIBUTION" + elif self == Metric.CIRCADIAN_WORKFORCE_EMD: + return "CIRCADIAN_WORKFORCE_DISTRIBUTION" elif self == Metric.ARRIVAL_EMD: return "ARRIVAL_EVENT_DISTRIBUTION" elif self == Metric.RELATIVE_EMD: @@ -78,6 +128,36 @@ def __str__(self): class CommonSettings(BaseModel): + """ + General configuration parameters of SIMOD and parameters common to all pipeline stages + + Attributes + ---------- + train_log_path : :class:`~pathlib.Path` + Path to the training log (the one used to discover the BPS model). + log_ids : :class:`EventLogIDs` + Dataclass storing the mapping between the column names in the CSV and their role (case_id, activity, etc.). + test_log_path : :class:`~pathlib.Path`, optional + Path to the event log to perform the final evaluation of the discovered BPS model (if desired). + process_model_path : :class:`~pathlib.Path`, optional + Path to the BPMN model for the control-flow (skip its discovery and use this one). + perform_final_evaluation : bool + Boolean indicating whether to perform the final evaluation of the discovered BPS model. + If true, either use the event log in [test_log_path] if specified, or split the training log to obtain a + testing set. + num_final_evaluations : int + Number of replications of the final evaluation to perform. + evaluation_metrics : list + List of :class:`Metric` evaluation metrics to use in the final evaluation. + use_observed_arrival_distribution : bool + Boolean indicating whether to use the distribution of observed case arrival times (true), or to discover a + probability distribution function to model them (false). + clean_intermediate_files : bool + Boolean indicating whether to delete all intermediate created files. + discover_data_attributes : bool + Boolean indicating whether to discover data attributes and their creation/update rules. + + """ # Log & Model parameters train_log_path: Path = Path("default_path.csv") log_ids: EventLogIDs = PROSIMOS_LOG_IDS @@ -90,10 +170,26 @@ class CommonSettings(BaseModel): # Common config use_observed_arrival_distribution: bool = False clean_intermediate_files: bool = True - discover_case_attributes: bool = False + discover_data_attributes: bool = False @staticmethod def from_dict(config: dict, config_dir: Optional[Path] = None) -> "CommonSettings": + """ + Instantiates the SIMOD common configuration from a dictionary. + + Parameters + ---------- + config : dict + Dictionary with the configuration values for the SIMOD common parameters. + config_dir : :class:`~pathlib.Path`, optional + If the path to the event log(s) is specified in a relative manner, ``[config_dir]`` is used to complete + such paths. If ``None``, relative paths are complemented with the current directory. + + Returns + ------- + :class:`CommonSettings` + Instance of the SIMOD common configuration for the specified dictionary values. + """ base_files_dir = config_dir or Path.cwd() # Training log path @@ -140,6 +236,7 @@ def from_dict(config: dict, config_dir: Optional[Path] = None) -> "CommonSetting Metric.TWO_GRAM_DISTANCE, Metric.THREE_GRAM_DISTANCE, Metric.CIRCADIAN_EMD, + Metric.CIRCADIAN_WORKFORCE_EMD, Metric.ARRIVAL_EMD, Metric.RELATIVE_EMD, Metric.ABSOLUTE_EMD, @@ -159,7 +256,7 @@ def from_dict(config: dict, config_dir: Optional[Path] = None) -> "CommonSetting use_observed_arrival_distribution = config.get("use_observed_arrival_distribution", False) clean_up = config.get("clean_intermediate_files", True) - discover_case_attributes = config.get("discover_case_attributes", False) + discover_data_attributes = config.get("discover_data_attributes", False) return CommonSettings( train_log_path=train_log_path, @@ -171,10 +268,18 @@ def from_dict(config: dict, config_dir: Optional[Path] = None) -> "CommonSetting evaluation_metrics=metrics, use_observed_arrival_distribution=use_observed_arrival_distribution, clean_intermediate_files=clean_up, - discover_case_attributes=discover_case_attributes, + discover_data_attributes=discover_data_attributes, ) def to_dict(self) -> dict: + """ + Translate the common configuration stored in this instance into a dictionary. + + Returns + ------- + dict + Python dictionary storing this configuration. + """ return { "train_log_path": str(self.train_log_path), "test_log_path": str(self.test_log_path) if self.test_log_path is not None else None, @@ -184,5 +289,5 @@ def to_dict(self) -> dict: "evaluation_metrics": [str(metric) for metric in self.evaluation_metrics], "use_observed_arrival_distribution": self.use_observed_arrival_distribution, "clean_intermediate_files": self.clean_intermediate_files, - "discover_case_attributes": self.discover_case_attributes, + "discover_data_attributes": self.discover_data_attributes, } diff --git a/src/simod/settings/control_flow_settings.py b/src/simod/settings/control_flow_settings.py index f5585b80..c2c5f948 100644 --- a/src/simod/settings/control_flow_settings.py +++ b/src/simod/settings/control_flow_settings.py @@ -9,11 +9,46 @@ class ProcessModelDiscoveryAlgorithm(str, Enum): + """ + Enumeration of process model discovery algorithms. + + This enum defines the available algorithms for discovering process models from event logs. + + Attributes + ---------- + SPLIT_MINER_V1 : str + Represents the first version of the Split Miner algorithm (`"sm1"`). + SPLIT_MINER_V2 : str + Represents the second version of the Split Miner algorithm (`"sm2"`). + """ + SPLIT_MINER_V1 = "sm1" SPLIT_MINER_V2 = "sm2" @classmethod def from_str(cls, value: str) -> "ProcessModelDiscoveryAlgorithm": + """ + Converts a string representation of a process model discovery algorithm + into the corresponding :class:`ProcessModelDiscoveryAlgorithm` instance. + + This method allows flexible input formats for each algorithm, supporting + multiple variations of their names. + + Parameters + ---------- + value : str + A string representing a process model discovery algorithm. + + Returns + ------- + :class:`ProcessModelDiscoveryAlgorithm` + The corresponding enum instance for the given algorithm name. + + Raises + ------ + ValueError + If the provided string does not match any known algorithm. + """ if value.lower() in [ "sm2", "splitminer2", @@ -51,7 +86,40 @@ def __str__(self): class ControlFlowSettings(BaseModel): """ - Control-flow optimization settings. + Control-flow model configuration parameters. + + This class defines the ranges of the configurable parameters for optimizing the control-flow + structure of a discovered process model, including metric selection, iteration settings, + and various discovery algorithm parameters. In each iteration of the optimization process, the + parameters are sampled from these values or ranges. + + Attributes + ---------- + optimization_metric : :class:`~simod.settings.common_settings.Metric` + The metric used to evaluate process model quality at each iteration of the optimization process (i.e., + loss function). + num_iterations : int + The number of optimization iterations to perform. + num_evaluations_per_iteration : int + The number of replications for the evaluations of each iteration. + gateway_probabilities : Union[:class:`GatewayProbabilitiesDiscoveryMethod`, List[:class:`GatewayProbabilitiesDiscoveryMethod`]] + Fixed method or list of methods to use in each iteration to discover gateway probabilities. + mining_algorithm : :class:`ProcessModelDiscoveryAlgorithm`, optional + The process model discovery algorithm to use. + epsilon : Union[float, Tuple[float, float]], optional + Fixed number or range for the number of concurrent relations between events to be captured in the discovery + algorithm (between 0.0 and 1.0). + eta : Union[float, Tuple[float, float]], optional + Fixed number or range for the threshold for filtering the incoming and outgoing edges in the discovery + algorithm (between 0.0 and 1.0). + replace_or_joins : Union[bool, List[bool]], optional + Fixed value or list for whether to replace non-trivial OR joins. + prioritize_parallelism : Union[bool, List[bool]], optional + Fixed value or list for whether to prioritize parallelism over loops. + discover_branch_rules : bool, optional + Whether to discover branch rules for gateways. + f_score : Union[float, Tuple[float, float]], optional + Fixed value or range for the minimum f-score value to consider the discovered data-aware branching rules. """ optimization_metric: Metric = Metric.THREE_GRAM_DISTANCE @@ -63,11 +131,22 @@ class ControlFlowSettings(BaseModel): mining_algorithm: Optional[ProcessModelDiscoveryAlgorithm] = ProcessModelDiscoveryAlgorithm.SPLIT_MINER_V1 epsilon: Optional[Union[float, Tuple[float, float]]] = (0.0, 1.0) # parallelism threshold (epsilon) eta: Optional[Union[float, Tuple[float, float]]] = (0.0, 1.0) # percentile for frequency threshold (eta) + discover_branch_rules: Optional[bool] = False + f_score: Optional[Union[float, Tuple[float, float]]] = 0.7 # quality gateway for branch rules (f_score) replace_or_joins: Optional[Union[bool, List[bool]]] = False # should replace non-trivial OR joins prioritize_parallelism: Optional[Union[bool, List[bool]]] = False # should prioritize parallelism on loops @staticmethod def one_shot() -> "ControlFlowSettings": + """ + Instantiates the control-flow model configuration for the one-shot mode (i.e., no optimization, one single + iteration). + + Returns + ------- + :class:`ControlFlowSettings` + Instance of the control-flow model configuration for the one-shot mode. + """ return ControlFlowSettings( optimization_metric=Metric.THREE_GRAM_DISTANCE, num_iterations=1, @@ -82,6 +161,19 @@ def one_shot() -> "ControlFlowSettings": @staticmethod def from_dict(config: dict) -> "ControlFlowSettings": + """ + Instantiates the control-flow model configuration from a dictionary. + + Parameters + ---------- + config : dict + Dictionary with the configuration values for the control-flow model parameters. + + Returns + ------- + :class:`ControlFlowSettings` + Instance of the control-flow model configuration for the specified dictionary values. + """ optimization_metric = Metric.from_str(config.get("optimization_metric", "n_gram_distance")) num_iterations = config.get("num_iterations", 10) num_evaluations_per_iteration = config.get("num_evaluations_per_iteration", 3) @@ -101,6 +193,11 @@ def from_dict(config: dict) -> "ControlFlowSettings": else: raise ValueError(f"Unknown process model discovery algorithm: {mining_algorithm}") + discover_branch_rules = config.get("discover_branch_rules", False) + f_score = None + if discover_branch_rules: + f_score = parse_single_value_or_interval(config.get("f_score", (0.0, 1.0))) + return ControlFlowSettings( optimization_metric=optimization_metric, num_iterations=num_iterations, @@ -111,9 +208,19 @@ def from_dict(config: dict) -> "ControlFlowSettings": eta=eta, replace_or_joins=replace_or_joins, prioritize_parallelism=prioritize_parallelism, + discover_branch_rules=discover_branch_rules, + f_score=f_score ) def to_dict(self) -> dict: + """ + Translate the control-flow model configuration stored in this instance into a dictionary. + + Returns + ------- + dict + Python dictionary storing this configuration. + """ dictionary = { "optimization_metric": self.optimization_metric.value, "num_iterations": self.num_iterations, @@ -135,4 +242,7 @@ def to_dict(self) -> dict: dictionary["replace_or_joins"] = self.replace_or_joins dictionary["prioritize_parallelism"] = self.prioritize_parallelism + if self.discover_branch_rules and self.f_score is not None: + dictionary["f_score"] = self.f_score + return dictionary diff --git a/src/simod/settings/extraneous_delays_settings.py b/src/simod/settings/extraneous_delays_settings.py index 4d3d0a20..ffed2d20 100644 --- a/src/simod/settings/extraneous_delays_settings.py +++ b/src/simod/settings/extraneous_delays_settings.py @@ -8,6 +8,27 @@ class ExtraneousDelaysSettings(BaseModel): + """ + Configuration settings for extraneous delay optimization. + + This class defines parameters for discovering and optimizing extraneous + delays in process simulations, including optimization metrics, discovery + methods, and iteration settings. In each iteration of the optimization process, the + parameters are sampled from these values or ranges. + + Attributes + ---------- + optimization_metric : :class:`ExtraneousDelaysOptimizationMetric` + The metric used to evaluate process model quality at each iteration of the optimization process (i.e., + loss function). + num_iterations : int + The number of optimization iterations to perform. + num_evaluations_per_iteration : int + The number of replications for the evaluations of each iteration. + discovery_method : :class:`ExtraneousDelaysDiscoveryMethod` + The method used to discover extraneous delays. + """ + optimization_metric: ExtraneousDelaysOptimizationMetric = ExtraneousDelaysOptimizationMetric.RELATIVE_EMD discovery_method: ExtraneousDelaysDiscoveryMethod = ExtraneousDelaysDiscoveryMethod.COMPLEX num_iterations: int = 1 @@ -15,6 +36,19 @@ class ExtraneousDelaysSettings(BaseModel): @staticmethod def from_dict(config: dict) -> "ExtraneousDelaysSettings": + """ + Instantiates the extraneous delays model configuration from a dictionary. + + Parameters + ---------- + config : dict + Dictionary with the configuration values for the extraneous delays model parameters. + + Returns + ------- + :class:`ExtraneousDelaysSettings` + Instance of the extraneous delays model configuration for the specified dictionary values. + """ optimization_metric = ExtraneousDelaysSettings._match_metric( config.get("optimization_metric", "relative_event_distribution") ) @@ -30,6 +64,14 @@ def from_dict(config: dict) -> "ExtraneousDelaysSettings": ) def to_dict(self) -> dict: + """ + Translate the extraneous delays model configuration stored in this instance into a dictionary. + + Returns + ------- + dict + Python dictionary storing this configuration. + """ return { "optimization_metric": str(self.optimization_metric.name), "discovery_method": str(self.discovery_method.name), diff --git a/src/simod/settings/preprocessing_settings.py b/src/simod/settings/preprocessing_settings.py index 7d2811a9..9d593c45 100644 --- a/src/simod/settings/preprocessing_settings.py +++ b/src/simod/settings/preprocessing_settings.py @@ -3,12 +3,45 @@ class PreprocessingSettings(BaseModel): + """ + Configuration for event log preprocessing. + + This class defines parameters used to preprocess event logs before + SIMOD main pipeline, including concurrency threshold settings + and multitasking options. + + Attributes + ---------- + multitasking : bool + Whether to preprocess the event log to handle resources working in more than one activity at a time. + enable_time_concurrency_threshold : float + Threshold for determining concurrent events (for computing enabled) time based on the ratio of overlapping + w.r.t. their occurrences. Ranges from 0 to 1 (0.3 means that two activities will be considered concurrent + when their execution overlaps in 30% or more of the cases). + concurrency_thresholds : :class:`ConcurrencyThresholds` + Thresholds for the computation of the start times (if missing) based on the Heuristics miner algorithm, + including direct-follows (df), length-2-loops (l2l), and length-1-loops (l1l). + """ + multitasking: bool = False enable_time_concurrency_threshold: float = 0.5 concurrency_thresholds: ConcurrencyThresholds = ConcurrencyThresholds(df=0.75, l2l=0.9, l1l=0.9) @staticmethod def from_dict(config: dict) -> "PreprocessingSettings": + """ + Instantiates SIMOD preprocessing configuration from a dictionary. + + Parameters + ---------- + config : dict + Dictionary with the configuration values for the preprocessing parameters. + + Returns + ------- + :class:`PreprocessingSettings` + Instance of SIMOD preprocessing configuration for the specified dictionary values. + """ return PreprocessingSettings( multitasking=config.get("multitasking", False), enable_time_concurrency_threshold=config.get("enable_time_concurrency_threshold", 0.5), @@ -20,6 +53,14 @@ def from_dict(config: dict) -> "PreprocessingSettings": ) def to_dict(self) -> dict: + """ + Translate the preprocessing configuration stored in this instance into a dictionary. + + Returns + ------- + dict + Python dictionary storing this configuration. + """ return { "multitasking": self.multitasking, "enable_time_concurrency_threshold": self.enable_time_concurrency_threshold, diff --git a/src/simod/settings/resource_model_settings.py b/src/simod/settings/resource_model_settings.py index 9ca95e9c..23b8a942 100644 --- a/src/simod/settings/resource_model_settings.py +++ b/src/simod/settings/resource_model_settings.py @@ -9,7 +9,43 @@ class ResourceModelSettings(BaseModel): """ - Resource Model optimization settings. + Configuration settings for resource model optimization. + + This class defines parameters for optimizing resource allocation and + scheduling in process simulations, including optimization metrics, + discovery methods, and statistical thresholds. In each iteration of the optimization process, the + parameters are sampled from these values or ranges. + + Attributes + ---------- + optimization_metric : :class:`Metric` + The metric used to evaluate the quality of resource model optimization in each iteration (i.e., loss function). + num_iterations : int + The number of optimization iterations to perform. + num_evaluations_per_iteration : int + The number of replications for the evaluations of each iteration. + discovery_type : :class:`CalendarType` + Type of calendar discovery method used for resource modeling. + granularity : Union[int, Tuple[int, int]], optional + Fixed value or range for the time granularity for calendar discovery, measured in minutes per granule (e.g., + 60 will imply discovering resource calendars with slots of 1 hour). Must be divisible by 1,440 (number of + minutes in a day). + confidence : Union[float, Tuple[float, float]], optional + Fixed value or range for the minimum confidence of the intervals in the discovered calendar of a resource + or set of resources (between 0.0 and 1.0). + support : Union[float, Tuple[float, float]], optional + Fixed value or range for the minimum support of the intervals in the discovered calendar of a resource or + set of resources (between 0.0 and 1.0). + participation : Union[float, Tuple[float, float]], optional + Fixed value or range for the participation of a resource in the process to discover a calendar for them, + gathered together otherwise (between 0.0 and 1.0). + fuzzy_angle : Union[float, Tuple[float, float]], optional + Fixed value or range for the angle of the fuzzy trapezoid when computing the availability probability for an + activity (angle from start to end). + discover_prioritization_rules : bool + Whether to discover case prioritization rules. + discover_batching_rules : bool + Whether to discover batching rules for resource allocation. """ optimization_metric: Metric = Metric.CIRCADIAN_EMD @@ -26,6 +62,15 @@ class ResourceModelSettings(BaseModel): @staticmethod def one_shot() -> "ResourceModelSettings": + """ + Instantiates the resource model configuration for the one-shot mode (i.e., no optimization, one single + iteration). + + Returns + ------- + :class:`ResourceModelSettings` + Instance of the resource model configuration for the one-shot mode. + """ return ResourceModelSettings( optimization_metric=Metric.CIRCADIAN_EMD, num_iterations=1, @@ -42,6 +87,19 @@ def one_shot() -> "ResourceModelSettings": @staticmethod def from_dict(config: dict) -> "ResourceModelSettings": + """ + Instantiates the resource model configuration from a dictionary. + + Parameters + ---------- + config : dict + Dictionary with the configuration values for the resource model parameters. + + Returns + ------- + :class:`ResourceModelSettings` + Instance of the resource model configuration for the specified dictionary values. + """ optimization_metric = Metric.from_str(config.get("optimization_metric", "circadian_emd")) num_iterations = config.get("num_iterations", 10) num_evaluations_per_iteration = config.get("num_evaluations_per_iteration", 3) @@ -63,7 +121,7 @@ def from_dict(config: dict) -> "ResourceModelSettings": support = parse_single_value_or_interval(resource_profiles.get("support", (0.01, 0.3))) participation = parse_single_value_or_interval(resource_profiles.get("participation", 0.4)) elif discovery_type == CalendarType.DIFFERENTIATED_BY_RESOURCE_FUZZY: - granularity = parse_single_value_or_interval(resource_profiles.get("granularity", (15, 60))) + granularity = parse_single_value_or_interval(resource_profiles.get("granularity", (60, 120))) fuzzy_angle = parse_single_value_or_interval(resource_profiles.get("fuzzy_angle", (0.1, 1.0))) return ResourceModelSettings( @@ -81,6 +139,14 @@ def from_dict(config: dict) -> "ResourceModelSettings": ) def to_dict(self) -> dict: + """ + Translate the resource model configuration stored in this instance into a dictionary. + + Returns + ------- + dict + Python dictionary storing this configuration. + """ # Parse general settings dictionary = { "optimization_metric": self.optimization_metric.value, diff --git a/src/simod/settings/simod_settings.py b/src/simod/settings/simod_settings.py index 10e9bc51..bb7b531e 100644 --- a/src/simod/settings/simod_settings.py +++ b/src/simod/settings/simod_settings.py @@ -18,8 +18,24 @@ class SimodSettings(BaseModel): """ - Simod configuration v4 with the settings for all the stages and optimizations. - If configuration is provided in v2, is transformed to v4. + SIMOD configuration v5 with the settings for all the stages and optimizations. + If configuration is provided in v2 or v4, it is automatically translated to v5. + + Attributes + ---------- + common : :class:`~simod.settings.common_settings.CommonSettings` + General configuration parameters of SIMOD and parameters common to all pipeline stages. + preprocessing : :class:`~simod.settings.preprocessing_settings.PreprocessingSettings` + Configuration parameters for the preprocessing stage of SIMOD. + control_flow : :class:`~simod.settings.control_flow_settings.ControlFlowSettings` + Configuration parameters for the control-flow model discovery stage. + resource_model : :class:`~simod.settings.resource_model_settings.ResourceModelSettings` + Configuration parameters for the resource model discovery stage. + extraneous_activity_delays : :class:`~simod.settings.extraneous_delays_settings.ExtraneousDelaysSettings` + Configuration parameters for the extraneous delays model discovery stage. If not provided, the extraneous + delays are not discovered. + version : int + SIMOD version. """ common: CommonSettings = CommonSettings() @@ -27,13 +43,17 @@ class SimodSettings(BaseModel): control_flow: ControlFlowSettings = ControlFlowSettings() resource_model: ResourceModelSettings = ResourceModelSettings() extraneous_activity_delays: Union[ExtraneousDelaysSettings, None] = None - version: int = 4 + version: int = 5 @staticmethod def default() -> "SimodSettings": """ - Default configuration for Simod. Used mostly for testing purposes. Most of those settings should be discovered - by Simod automatically. + Default configuration for SIMOD. + + Returns + ------- + :class:`SimodSettings` + Instance of the SIMOD configuration with the default values. """ return SimodSettings( @@ -46,6 +66,15 @@ def default() -> "SimodSettings": @staticmethod def one_shot() -> "SimodSettings": + """ + Configuration for SIMOD one-shot. This mode runs SIMOD without optimizing each BPS model component (i.e., + directly discover each BPS model component with default parameters). + + Returns + ------- + :class:`SimodSettings` + Instance of the SIMOD configuration for one-shot mode. + """ return SimodSettings( common=CommonSettings(), preprocessing=PreprocessingSettings(), @@ -56,11 +85,29 @@ def one_shot() -> "SimodSettings": @staticmethod def from_yaml(config: dict, config_dir: Optional[Path] = None) -> "SimodSettings": - assert config["version"] in [2, 4], "Configuration version must be 2 or 4" + """ + Instantiates the SIMOD configuration from a dictionary following the expected YAML structure. + + Parameters + ---------- + config : dict + Dictionary with the configuration values for each of the SIMOD elements. + config_dir : :class:`~pathlib.Path`, optional + If the path to the event log(s) is specified in a relative manner, ``[config_dir]`` is used to complete + such paths. If ``None``, relative paths are complemented with the current directory. + + Returns + ------- + :class:`SimodSettings` + Instance of the SIMOD configuration for the specified dictionary values. + """ + assert config["version"] in [2, 4, 5], "Configuration version must be 2, 4, or 5" # Transform from previous version to the latest if needed if config["version"] == 2: - config = _parse_legacy_config(config) + config = _parse_legacy_config_2(config) + elif config["version"] == 4: + config = _parse_legacy_config_4(config) # Get each of the settings components if present, default otherwise if "common" in config: @@ -107,11 +154,32 @@ def from_yaml(config: dict, config_dir: Optional[Path] = None) -> "SimodSettings @staticmethod def from_path(file_path: Path) -> "SimodSettings": + """ + Instantiates the SIMOD configuration from a YAML file. + + Parameters + ---------- + file_path : :class:`~pathlib.Path` + Path to the YAML file storing the configuration. + + Returns + ------- + :class:`SimodSettings` + Instance of the SIMOD configuration for the specified YAML file. + """ with file_path.open() as f: config = yaml.safe_load(f) return SimodSettings.from_yaml(config, config_dir=file_path.parent) def to_dict(self) -> dict: + """ + Translate the SIMOD configuration stored in this instance into a dictionary. + + Returns + ------- + dict + Python dictionary storing this configuration. + """ dictionary = { "version": self.version, "common": self.common.to_dict(), @@ -126,8 +194,16 @@ def to_dict(self) -> dict: def to_yaml(self, output_dir: Path) -> Path: """ Saves the configuration to a YAML file in the provided output directory. - :param output_dir: Output directory. - :return: None. + + Parameters + ---------- + output_dir : :class:`~pathlib.Path` + Path to the output directory where to store the YAML file with the configuration. + + Returns + ------- + :class:`~pathlib.Path` + Path to the YAML file with the configuration. """ data = yaml.dump(self.to_dict(), sort_keys=False) output_path = output_dir / "configuration.yaml" @@ -136,11 +212,11 @@ def to_yaml(self, output_dir: Path) -> Path: return output_path -def _parse_legacy_config(config: dict) -> dict: +def _parse_legacy_config_2(config: dict) -> dict: parsed_config = copy.deepcopy(config) if config["version"] == 2: - # Transform dictionary from version 2 to 4 - parsed_config["version"] = 4 + # Transform dictionary from version 2 to 5 + parsed_config["version"] = 5 # Common elements if "log_path" in parsed_config["common"]: parsed_config["common"]["train_log_path"] = parsed_config["common"]["log_path"] @@ -174,3 +250,16 @@ def _parse_legacy_config(config: dict) -> dict: del parsed_config["resource_model"]["case_arrival"] # Return parsed configuration return parsed_config + + +def _parse_legacy_config_4(config: dict) -> dict: + parsed_config = copy.deepcopy(config) + if config["version"] == 4: + # Transform dictionary from version 4 to 5 + parsed_config["version"] = 5 + # Common elements + if "discover_case_attributes" in parsed_config["common"]: + parsed_config["common"]["discover_data_attributes"] = parsed_config["common"]["discover_case_attributes"] + del parsed_config["common"]["discover_case_attributes"] + # Return parsed configuration + return parsed_config diff --git a/src/simod/simod.py b/src/simod/simod.py index 37659b1c..5e5992f9 100644 --- a/src/simod/simod.py +++ b/src/simod/simod.py @@ -15,11 +15,12 @@ from pix_framework.io.bpmn import get_activities_names_from_bpmn from simod.batching.discovery import discover_batching_rules -from simod.case_attributes.discovery import discover_case_attributes +from simod.branch_rules.discovery import discover_branch_rules, map_branch_rules_to_flows from simod.cli_formatter import print_section, print_subsection -from simod.control_flow.discovery import discover_process_model +from simod.control_flow.discovery import discover_process_model, add_bpmn_diagram_to_model from simod.control_flow.optimizer import ControlFlowOptimizer from simod.control_flow.settings import HyperoptIterationParams as ControlFlowHyperoptIterationParams +from simod.data_attributes.discovery import discover_data_attributes from simod.event_log.event_log import EventLog from simod.extraneous_delays.optimizer import ExtraneousDelaysOptimizer from simod.extraneous_delays.types import ExtraneousDelay @@ -28,6 +29,8 @@ from simod.resource_model.optimizer import ResourceModelOptimizer from simod.resource_model.repair import repair_with_missing_activities from simod.resource_model.settings import HyperoptIterationParams as ResourceModelHyperoptIterationParams +from simod.runtime_meter import RuntimeMeter +from simod.settings.control_flow_settings import ProcessModelDiscoveryAlgorithm from simod.settings.simod_settings import SimodSettings from simod.simulation.parameters.BPS_model import BPSModel from simod.simulation.prosimos import simulate_and_evaluate @@ -36,7 +39,18 @@ class Simod: """ - SIMOD optimization. + Class to run the full pipeline of SIMOD in order to discover a BPS model from an event log. + + Attributes + ---------- + settings : :class:`~simod.settings.simod_settings.SimodSettings` + Configuration to run SIMOD and all its stages. + event_log : :class:`~simod.event_log.event_log.EventLog` + EventLog class storing the preprocessed training, validation, and (optionally) test partitions. + output_dir : :class:`~pathlib.Path` + Path to the folder where to write all the SIMOD outputs. + final_bps_model : :class:`~simod.simulation.parameters.BPS_model.BPSModel` + Instance of the best BPS model discovered by SIMOD. """ # Event log with the train, validation and test logs. @@ -81,11 +95,34 @@ def __init__( self._best_result_dir = self._output_dir / "best_result" create_folder(self._best_result_dir) - def run(self): + def run(self, runtimes: Optional[RuntimeMeter] = None): """ - Optimizes the BPS model with the given event log and settings. + Executes the SIMOD pipeline to discover the BPS model that better reflects the behavior recorded in the input + event log based on the specified configuration. + + Parameters + ---------- + runtimes : :class:`~simod.runtime_meter.RuntimeMeter`, optional + Instance for tracking the runtime of the different stages in the SIMOD pipeline. When provided, SIMOD + pipeline stages will be tracked and reported along with stages previously tracked in the instance (e.g., + preprocessing). If not provided, the runtime tracking reported will only contain SIMOD stages. + + Returns + ------- + None + The method performs in-place execution of the pipeline and does not return a value. + + Notes + ----- + - This method generates all output files under the folder ``[output_dir]//best_result/``. + - This method updates internal attributes of the class, such as `final_bps_model`, with the best BPS model found + during the pipeline execution. """ + # Runtime object + runtimes = RuntimeMeter() if runtimes is None else runtimes + runtimes.start(RuntimeMeter.TOTAL) + # Model activities might be different from event log activities if the model has been provided, # because we split the event log into train, test, and validation partitions. # We use model_activities to repair resource_model later after its discovery from a reduced event log. @@ -95,16 +132,19 @@ def run(self): # --- Discover Default Case Arrival and Resource Allocation models --- # print_section("Discovering initial BPS Model") + runtimes.start(RuntimeMeter.INITIAL_MODEL) self._best_bps_model.case_arrival_model = discover_case_arrival_model( self._event_log.train_validation_partition, # No optimization process here, use train + validation self._event_log.log_ids, use_observed_arrival_distribution=self._settings.common.use_observed_arrival_distribution, ) + calendar_discovery_parameters = CalendarDiscoveryParameters() self._best_bps_model.resource_model = discover_resource_model( self._event_log.train_partition, # Only train to not discover tasks that won't exist for control-flow opt. self._event_log.log_ids, - CalendarDiscoveryParameters(), + calendar_discovery_parameters, ) + self._best_bps_model.calendar_granularity = calendar_discovery_parameters.granularity if model_activities is not None: repair_with_missing_activities( resource_model=self._best_bps_model.resource_model, @@ -112,45 +152,59 @@ def run(self): event_log=self._event_log.train_validation_partition, log_ids=self._event_log.log_ids, ) + runtimes.stop(RuntimeMeter.INITIAL_MODEL) # --- Control-Flow Optimization --- # print_section("Optimizing control-flow parameters") + runtimes.start(RuntimeMeter.CONTROL_FLOW_MODEL) best_control_flow_params = self._optimize_control_flow() self._best_bps_model.process_model = self._control_flow_optimizer.best_bps_model.process_model self._best_bps_model.gateway_probabilities = self._control_flow_optimizer.best_bps_model.gateway_probabilities + self._best_bps_model.branch_rules = self._control_flow_optimizer.best_bps_model.branch_rules + runtimes.stop(RuntimeMeter.CONTROL_FLOW_MODEL) - # --- Case Attributes --- # - if ( - self._settings.common.discover_case_attributes - or self._settings.resource_model.discover_prioritization_rules - ): - print_section("Discovering case attributes") - case_attributes = discover_case_attributes( - self._event_log.train_validation_partition, # No optimization process here, use train + validation + # --- Data Attributes --- # + if (self._settings.common.discover_data_attributes or + self._settings.resource_model.discover_prioritization_rules): + print_section("Discovering data attributes") + runtimes.start(RuntimeMeter.DATA_ATTRIBUTES_MODEL) + global_attributes, case_attributes, event_attributes = discover_data_attributes( + self._event_log.train_validation_partition, self._event_log.log_ids, ) + self._best_bps_model.global_attributes = global_attributes self._best_bps_model.case_attributes = case_attributes + self._best_bps_model.event_attributes = event_attributes + runtimes.stop(RuntimeMeter.DATA_ATTRIBUTES_MODEL) # --- Resource Model Discovery --- # print_section("Optimizing resource model parameters") + runtimes.start(RuntimeMeter.RESOURCE_MODEL) best_resource_model_params = self._optimize_resource_model(model_activities) self._best_bps_model.resource_model = self._resource_model_optimizer.best_bps_model.resource_model + self._best_bps_model.calendar_granularity = self._resource_model_optimizer.best_bps_model.calendar_granularity self._best_bps_model.prioritization_rules = self._resource_model_optimizer.best_bps_model.prioritization_rules self._best_bps_model.batching_rules = self._resource_model_optimizer.best_bps_model.batching_rules + runtimes.stop(RuntimeMeter.RESOURCE_MODEL) # --- Extraneous Delays Discovery --- # if self._settings.extraneous_activity_delays is not None: print_section("Discovering extraneous delays") + runtimes.start(RuntimeMeter.EXTRANEOUS_DELAYS) timers = self._optimize_extraneous_activity_delays() self._best_bps_model.extraneous_delays = timers add_timers_to_bpmn_model(self._best_bps_model.process_model, timers) # Update BPMN model on disk + runtimes.stop(RuntimeMeter.EXTRANEOUS_DELAYS) # --- Discover final BPS model --- # print_section("Discovering final BPS model") + runtimes.start(RuntimeMeter.FINAL_MODEL) self.final_bps_model = BPSModel( # Bypass all models already discovered with train+validation process_model=get_process_model_path(self._best_result_dir, self._event_log.process_name), case_arrival_model=self._best_bps_model.case_arrival_model, case_attributes=self._best_bps_model.case_attributes, + global_attributes=self._best_bps_model.global_attributes, + event_attributes=self._best_bps_model.event_attributes, ) # Process model if self._settings.common.process_model_path is None: @@ -160,7 +214,10 @@ def run(self): ) # Instantiate event log to discover the process model with xes_log_path = self._best_result_dir / f"{self._event_log.process_name}_train_val.xes" - self._event_log.train_validation_to_xes(xes_log_path) + if best_control_flow_params.mining_algorithm is ProcessModelDiscoveryAlgorithm.SPLIT_MINER_V1: + self._event_log.train_validation_to_xes(xes_log_path, only_complete_events=True) + else: + self._event_log.train_validation_to_xes(xes_log_path) # Discover the process model discover_process_model( log_path=xes_log_path, @@ -180,6 +237,17 @@ def run(self): bpmn_graph=best_bpmn_graph, discovery_method=best_control_flow_params.gateway_probabilities_method, ) + # Branch Rules + if self._settings.control_flow.discover_branch_rules: + print_section("Discovering branch conditions") + self.final_bps_model.branch_rules = discover_branch_rules( + best_bpmn_graph, + self._event_log.train_validation_partition, + self._event_log.log_ids, + f_score=best_control_flow_params.f_score + ) + self.final_bps_model.gateway_probabilities = \ + map_branch_rules_to_flows(self.final_bps_model.gateway_probabilities, self.final_bps_model.branch_rules) # Resource model print_subsection("Discovering best resource model") self.final_bps_model.resource_model = discover_resource_model( @@ -187,6 +255,7 @@ def run(self): log_ids=self._event_log.log_ids, params=best_resource_model_params.calendar_discovery_params, ) + self.final_bps_model.calendar_granularity = best_resource_model_params.calendar_discovery_params.granularity if model_activities is not None: repair_with_missing_activities( resource_model=self.final_bps_model.resource_model, @@ -214,6 +283,9 @@ def run(self): self.final_bps_model.extraneous_delays = self._best_bps_model.extraneous_delays add_timers_to_bpmn_model(self.final_bps_model.process_model, self._best_bps_model.extraneous_delays) self.final_bps_model.replace_activity_names_with_ids() + runtimes.stop(RuntimeMeter.FINAL_MODEL) + runtimes.stop(RuntimeMeter.TOTAL) + # Write JSON parameters to file json_parameters_path = get_simulation_parameters_path(self._best_result_dir, self._event_log.process_name) with json_parameters_path.open("w") as f: @@ -222,18 +294,25 @@ def run(self): # --- Evaluate final BPS model --- # if self._settings.common.perform_final_evaluation: print_subsection("Evaluate") + runtimes.start(RuntimeMeter.EVALUATION) simulation_dir = self._best_result_dir / "evaluation" simulation_dir.mkdir(parents=True, exist_ok=True) self._evaluate_model(self.final_bps_model.process_model, json_parameters_path, simulation_dir) + runtimes.stop(RuntimeMeter.EVALUATION) # --- Export settings and clean temporal files --- # + print_section(f"Exporting canonical model, runtimes, settings and cleaning up intermediate files") canonical_model_path = self._best_result_dir / "canonical_model.json" - print_section(f"Exporting canonical model to {canonical_model_path}") _export_canonical_model(canonical_model_path, best_control_flow_params, best_resource_model_params) + runtimes_model_path = self._best_result_dir / "runtimes.json" + _export_runtimes(runtimes_model_path, runtimes) if self._settings.common.clean_intermediate_files: self._clean_up() self._settings.to_yaml(self._best_result_dir) + # --- Add BPMN diagram to the model --- # + add_bpmn_diagram_to_model(self.final_bps_model.process_model) + def _optimize_control_flow(self) -> ControlFlowHyperoptIterationParams: """ Control-flow and Gateway Probabilities discovery. @@ -318,14 +397,24 @@ def _export_canonical_model( control_flow_settings: ControlFlowHyperoptIterationParams, calendar_settings: ResourceModelHyperoptIterationParams, ): - structure = control_flow_settings.to_dict() - - calendars = calendar_settings.to_dict() - canon = { - "control_flow": structure, - "calendars": calendars, + "control_flow": control_flow_settings.to_dict(), + "calendars": calendar_settings.to_dict(), } - with open(file_path, "w") as f: json.dump(canon, f) + + +def _export_runtimes( + file_path: Path, + runtimes: RuntimeMeter +): + with open(file_path, "w") as file: + json.dump( + runtimes.runtimes | {'explanation': f"Add '{RuntimeMeter.PREPROCESSING}' with '{RuntimeMeter.TOTAL}' " + f"for the runtime of the entire SIMOD pipeline and preprocessing " + f"stage. '{RuntimeMeter.EVALUATION}', if reported, should be left out " + f"as it measures the quality assessment of the final BPS model (i.e., " + f"it is not part of the discovery process."}, + file + ) diff --git a/src/simod/simulation/parameters/BPS_model.py b/src/simod/simulation/parameters/BPS_model.py index a1a38219..308146f3 100644 --- a/src/simod/simulation/parameters/BPS_model.py +++ b/src/simod/simulation/parameters/BPS_model.py @@ -8,10 +8,12 @@ from pix_framework.discovery.gateway_probabilities import GatewayProbabilities from pix_framework.discovery.resource_calendar_and_performance.fuzzy.resource_calendar import FuzzyResourceCalendar from pix_framework.discovery.resource_model import ResourceModel + from pix_framework.io.bpmn import get_activities_ids_by_name_from_bpmn from simod.batching.types import BatchingRule -from simod.case_attributes.types import CaseAttribute +from simod.branch_rules.types import BranchRules +from simod.data_attributes.types import CaseAttribute, GlobalAttribute, EventAttribute from simod.extraneous_delays.types import ExtraneousDelay from simod.prioritization.types import PrioritizationRule from simod.utilities import get_simulation_parameters_path @@ -26,14 +28,54 @@ RESOURCE_ACTIVITY_PERFORMANCE_KEY = "task_resource_distribution" EXTRANEOUS_DELAYS_KEY = "event_distribution" CASE_ATTRIBUTES_KEY = "case_attributes" +GLOBAL_ATTRIBUTES_KEY = "global_attributes" +EVENT_ATTRIBUTES_KEY = "event_attributes" PRIORITIZATION_RULES_KEY = "prioritisation_rules" BATCHING_RULES_KEY = "batch_processing" +BRANCH_RULES_KEY = "branch_rules" @dataclass class BPSModel: """ - BPS model class containing all the components to simulate a business process model. + Represents a Business Process Simulation (BPS) model containing all necessary components + to simulate a business process. + + This class manages various elements such as the BPMN process model, resource configurations, + extraneous delays, case attributes, and prioritization/batching rules. It provides methods + to convert the model into a format compatible with Prosimos and handle activity ID mappings. + + Attributes + ---------- + process_model : :class:`pathlib.Path`, optional + Path to the BPMN process model file. + gateway_probabilities : List[:class:`GatewayProbabilities`], optional + Probabilities for gateway-based process routing. + case_arrival_model : :class:`CaseArrivalModel`, optional + Model for the arrival of new cases in the simulation. + resource_model : :class:`ResourceModel`, optional + Model for the resources involved in the process, their working schedules, etc. + extraneous_delays : List[:class:`~simod.extraneous_delays.types.ExtraneousDelay`], optional + A list of delays representing extraneous waiting times before/after activities. + case_attributes : List[:class:`CaseAttribute`], optional + Case-level attributes and their update rules. + global_attributes : List[:class:`GlobalAttribute`], optional + Global attributes and their update rules. + event_attributes : List[:class:`EventAttribute`], optional + Event-level attributes and their update rules. + prioritization_rules : List[:class:`PrioritizationRule`], optional + A set of case prioritization rules for process execution. + batching_rules : List[:class:`BatchingRule`], optional + Rules defining how activities are batched together. + branch_rules : List[:class:`BranchRules`], optional + Branching rules defining conditional flow behavior in decision points. + calendar_granularity : int, optional + Granularity of the resource calendar, expressed in minutes. + + Notes + ----- + - `to_prosimos_format` transforms the model into a dictionary format used by Prosimos. + - `replace_activity_names_with_ids` modifies activity references to use BPMN IDs instead of names. """ process_model: Optional[Path] = None # A path to the model for now, in future the loaded BPMN model @@ -42,10 +84,33 @@ class BPSModel: resource_model: Optional[ResourceModel] = None extraneous_delays: Optional[List[ExtraneousDelay]] = None case_attributes: Optional[List[CaseAttribute]] = None + global_attributes: Optional[List[GlobalAttribute]] = None + event_attributes: Optional[List[EventAttribute]] = None prioritization_rules: Optional[List[PrioritizationRule]] = None batching_rules: Optional[List[BatchingRule]] = None + branch_rules: Optional[List[BranchRules]] = None + calendar_granularity: Optional[int] = None + + def to_prosimos_format(self) -> dict: + """ + Converts the BPS model into a dictionary format compatible with the Prosimos simulation engine. + + This method extracts all relevant process simulation attributes, including resource models, + delays, prioritization rules, and activity mappings, and structures them in a format + understood by Prosimos. + + Returns + ------- + dict + A dictionary representation of the BPS model, ready for simulation in Prosimos. + + Notes + ----- + - If the resource model contains a fuzzy calendar, the model type is set to "FUZZY"; + otherwise, it defaults to "CRISP". + - The function ensures activity labels are properly linked to their respective BPMN IDs. + """ - def to_prosimos_format(self, granule_size: int = 15) -> dict: # Get map activity label -> node ID activity_label_to_id = get_activities_ids_by_name_from_bpmn(self.process_model) @@ -66,7 +131,11 @@ def to_prosimos_format(self, granule_size: int = 15) -> dict: ] if self.case_attributes is not None: attributes[CASE_ATTRIBUTES_KEY] = [case_attribute.to_prosimos() for case_attribute in self.case_attributes] - if self.prioritization_rules is not None: + if self.global_attributes is not None: + attributes[GLOBAL_ATTRIBUTES_KEY] = [global_attribute.to_prosimos() for global_attribute in self.global_attributes] + if self.event_attributes is not None: + attributes[EVENT_ATTRIBUTES_KEY] = [event_attribute.to_prosimos() for event_attribute in self.event_attributes] + if self.case_attributes is not None and self.prioritization_rules is not None: attributes[PRIORITIZATION_RULES_KEY] = [ priority_rule.to_prosimos() for priority_rule in self.prioritization_rules ] @@ -74,22 +143,53 @@ def to_prosimos_format(self, granule_size: int = 15) -> dict: attributes[BATCHING_RULES_KEY] = [ batching_rule.to_prosimos(activity_label_to_id) for batching_rule in self.batching_rules ] + if self.branch_rules is not None: + attributes[BRANCH_RULES_KEY] = [branch_rules.to_dict() for branch_rules in self.branch_rules] if isinstance(self.resource_model.resource_calendars[0], FuzzyResourceCalendar): attributes["model_type"] = "FUZZY" else: attributes["model_type"] = "CRISP" - attributes["granule_size"] = {"value": granule_size, "time_unit": "MINUTES"} + attributes["granule_size"] = {"value": self.calendar_granularity, "time_unit": "MINUTES"} return attributes def deep_copy(self) -> "BPSModel": + """ + Creates a deep copy of the current BPSModel instance. + + This ensures that modifying the copied instance does not affect the original. + + Returns + ------- + :class:`BPSModel` + A new, independent copy of the current BPSModel instance. + + Notes + ----- + This method uses Python's `copy.deepcopy()` to create a full recursive copy of the model. + """ return copy.deepcopy(self) def replace_activity_names_with_ids(self): """ - Updates activity labels with activity IDs from the current (BPMN) process model. + Replaces activity names with their corresponding IDs from the BPMN process model. + + Prosimos requires activity references to be identified by their BPMN node IDs instead of + activity labels. This method updates: - In BPSModel, the activities are referenced by their name, Prosimos uses IDs instead from the BPMN model. + - Resource associations in the resource profiles. + - Activity-resource distributions. + - Event attributes referencing activity names. + + Raises + ------ + KeyError + If an activity name does not exist in the BPMN model. + + Notes + ----- + - This method modifies the model in place. + - It ensures compatibility with Prosimos by aligning activity references with BPMN IDs. """ # Get map activity label -> node ID activity_label_to_id = get_activities_ids_by_name_from_bpmn(self.process_model) @@ -107,10 +207,39 @@ def replace_activity_names_with_ids(self): activity_resource_distributions.activity_id ] - def to_json(self, output_dir: Path, process_name: str, granule_size: int = 15) -> Path: + # Update activity label in event attributes + if self.event_attributes is not None: + for event_attribute in self.event_attributes: + event_attribute.event_id = activity_label_to_id[event_attribute.event_id] + + def to_json(self, output_dir: Path, process_name: str) -> Path: + """ + Saves the BPS model in a Prosimos-compatible JSON format. + + This method generates a structured JSON file containing all necessary simulation parameters, + ensuring that the model can be directly used by the Prosimos engine. + + Parameters + ---------- + output_dir : :class:`pathlib.Path` + The directory where the JSON file should be saved. + process_name : str + The name of the process, used for naming the output file. + + Returns + ------- + :class:`pathlib.Path` + The full path to the generated JSON file. + + Notes + ----- + - The JSON file is created in `output_dir` with a filename based on `process_name`. + - Uses `json.dump()` to serialize the model into a structured format. + - Ensures all attributes are converted into a valid Prosimos format before writing. + """ json_parameters_path = get_simulation_parameters_path(output_dir, process_name) with json_parameters_path.open("w") as f: - json.dump(self.to_prosimos_format(granule_size=granule_size), f) + json.dump(self.to_prosimos_format(), f) return json_parameters_path diff --git a/src/simod/simulation/prosimos.py b/src/simod/simulation/prosimos.py index e77f7bea..47337752 100644 --- a/src/simod/simulation/prosimos.py +++ b/src/simod/simulation/prosimos.py @@ -1,18 +1,11 @@ import itertools -import json import multiprocessing from concurrent.futures import ProcessPoolExecutor as Pool from dataclasses import dataclass from pathlib import Path -from typing import Dict, List, Optional, Tuple +from typing import List, Tuple import pandas as pd -from pix_framework.discovery.gateway_probabilities import GatewayProbabilities -from pix_framework.discovery.resource_calendar_and_performance.crisp.resource_calendar import RCalendar -from pix_framework.discovery.resource_calendar_and_performance.resource_activity_performance import ( - ActivityResourceDistribution, -) -from pix_framework.discovery.resource_profiles import ResourceProfile from pix_framework.io.event_log import PROSIMOS_LOG_IDS, EventLogIDs, read_csv_log from prosimos.simulation_engine import run_simulation @@ -24,56 +17,24 @@ @dataclass -class SimulationParameters: +class ProsimosSettings: """ - Prosimos simulation parameters. + Configuration settings for running a Prosimos simulation. + + Attributes + ---------- + bpmn_path : :class:`pathlib.Path` + Path to the BPMN process model. + parameters_path : :class:`pathlib.Path` + Path to the Prosimos simulation parameters JSON file. + output_log_path : :class:`pathlib.Path` + Path to store the generated simulation log. + num_simulation_cases : int + Number of cases to simulate. + simulation_start : :class:`pandas.Timestamp` + Start timestamp for the simulation. """ - resource_profiles: List[ResourceProfile] - resource_calendars: Dict[str, RCalendar] - task_resource_distributions: List[ActivityResourceDistribution] - arrival_distribution: dict - arrival_calendar: RCalendar - gateway_branching_probabilities: List[GatewayProbabilities] - event_distribution: Optional[dict] - - def to_dict(self) -> dict: - """Dictionary compatible with Prosimos.""" - parameters = { - "resource_profiles": [resource_profile.to_dict() for resource_profile in self.resource_profiles], - "resource_calendars": [ - { - "id": self.resource_calendars[calendar_id].calendar_id, - "name": self.resource_calendars[calendar_id].calendar_id, - "time_periods": self.resource_calendars[calendar_id].to_json(), - } - for calendar_id in self.resource_calendars - ], - "task_resource_distribution": [ - activity_resources.to_dict() for activity_resources in self.task_resource_distributions - ], - "arrival_time_distribution": self.arrival_distribution, - "arrival_time_calendar": self.arrival_calendar.to_json(), - "gateway_branching_probabilities": [ - gateway_probabilities.to_dict() for gateway_probabilities in self.gateway_branching_probabilities - ], - } - - if self.event_distribution: - parameters["event_distribution"] = self.event_distribution - - return parameters - - def to_json_file(self, file_path: Path) -> None: - """JSON compatible with Prosimos.""" - with file_path.open("w") as f: - json.dump(self.to_dict(), f) - - -@dataclass -class ProsimosSettings: - """Prosimos simulator settings.""" - bpmn_path: Path parameters_path: Path output_log_path: Path @@ -83,9 +44,18 @@ class ProsimosSettings: def simulate(settings: ProsimosSettings): """ - Simulates a process model using Prosimos. - :param settings: Prosimos settings. - :return: None. + Runs a Prosimos simulation with the provided settings. + + Parameters + ---------- + settings : :class:`ProsimosSettings` + Configuration settings containing paths and parameters for the simulation. + + Notes + ----- + - The function prints the simulation settings and invokes `run_simulation()`. + - The labels of the start event, end event, and event timers are**not** recorded to the output log. + - The simulation generates a process log stored in `settings.output_log_path`. """ print_message(f"Simulation settings: {settings}") @@ -112,19 +82,41 @@ def simulate_and_evaluate( num_simulations: int = 1, ) -> List[dict]: """ - Simulates a process model using Prosimos num_simulations times in parallel. - - :param process_model_path: Path to the BPMN model. - :param parameters_path: Path to the Prosimos parameters. - :param output_dir: Path to the output directory for simulated logs. - :param simulation_cases: Number of cases to simulate. - :param simulation_start_time: Start time of the simulation. - :param validation_log: Validation log. - :param validation_log_ids: Validation log IDs. - :param metrics: Metrics to evaluate the simulated logs with. - :param num_simulations: Number of simulations to run in parallel. Default: 1. More simulations increase - the accuracy of evaluation metrics. - :return: Evaluation metrics. + Simulates a process model using Prosimos multiple times and evaluates the results. + + This function runs the simulation `num_simulations` times in parallel, + compares the generated logs with a validation log, and evaluates them using provided metrics. + + Parameters + ---------- + process_model_path : :class:`pathlib.Path` + Path to the BPMN process model. + parameters_path : :class:`pathlib.Path` + Path to the Prosimos simulation parameters JSON file. + output_dir : :class:`pathlib.Path` + Directory where simulated logs will be stored. + simulation_cases : int + Number of cases to simulate per run. + simulation_start_time : :class:`pandas.Timestamp` + Start timestamp for the simulation. + validation_log : :class:`pandas.DataFrame` + The actual event log to compare against. + validation_log_ids : :class:`EventLogIDs` + Column mappings for identifying events in the validation log. + metrics : List[:class:`~simod.settings.common_settings.Metric`] + A list of metrics used to evaluate the simulated logs. + num_simulations : int, optional + Number of parallel simulation runs (default is 1). + + Returns + ------- + List[dict] + A list of evaluation results, one for each simulated log. + + Notes + ----- + - Uses multiprocessing to speed up simulation when `num_simulations > 1`. + - Simulated logs are automatically compared with `validation_log`. """ simulation_log_paths = simulate_in_parallel( diff --git a/tests/assets/bpic15/bpic15_1_with_model_v4.yml b/tests/assets/bpic15/bpic15_1_with_model_v4.yml index 244ce801..0068bcd4 100644 --- a/tests/assets/bpic15/bpic15_1_with_model_v4.yml +++ b/tests/assets/bpic15/bpic15_1_with_model_v4.yml @@ -1,7 +1,7 @@ version: 4 common: - train_log_path: tests/assets/bpic15/BPIC15_1.csv.gz - process_model_path: tests/assets/bpic15/BPIC15_1.bpmn + train_log_path: BPIC15_1.csv.gz + process_model_path: BPIC15_1.bpmn num_final_evaluations: 1 # Number of evaluations of the discovered BPS model. evaluation_metrics: # Metrics to evaluate the discovered BPS model with. - 3_gram_distance @@ -12,6 +12,13 @@ common: - arrival_event_distribution - cycle_time_distribution clean_intermediate_files: false + log_ids: + case: case:concept:name + activity: concept:name + resource: org:resource + start_time: start_timestamp + end_time: time:timestamp + enabled_time: enabled_time preprocessing: multitasking: false # Reassign activity durations when happening in multitasking. enable_time_concurrency_threshold: 0.5 # Concurrency threshold for the enabled time computation. diff --git a/tests/assets/branch_rules/or.bpmn b/tests/assets/branch_rules/or.bpmn new file mode 100644 index 00000000..84b0e497 --- /dev/null +++ b/tests/assets/branch_rules/or.bpmn @@ -0,0 +1,453 @@ + + + + + Flow_1vzhb1l + + + Flow_1vzhb1l + Flow_0xb2xf0 + + + + + Flow_0n2s5vp + Flow_0sjd2ow + + + + Flow_0soxp11 + Flow_1n9fbuf + + + + Flow_1s30srz + Flow_1izo574 + + + + Flow_1dpz071 + Flow_068zn7p + + + + Flow_1c9y4tt + Flow_1rektlj + + + + + + + + + Flow_09ykvoz + Flow_10ouj06 + + + + + Flow_00r7i8p + Flow_1glw92x + + + + Flow_0aggkf3 + Flow_0bejdyp + + + + Flow_1dn1o8l + Flow_1iyrxjx + + + + Flow_0mjbtxw + Flow_06chdo3 + + + + Flow_0v35org + Flow_12s902l + + + + + + + + + Flow_1jy76gi + Flow_011feji + + + + + Flow_0ke58uw + Flow_1445xsr + + + + Flow_0scx59i + Flow_0287uvz + + + + Flow_18cml4m + Flow_0guc4r3 + + + + Flow_1jz8xih + Flow_0uvokpw + + + + Flow_1vylh4c + Flow_1e2zd98 + + + + + + + + + Flow_1jwqbfj + Flow_1a55712 + + + + Flow_1a55712 + + + + Flow_0xb2xf0 + Flow_0n2s5vp + Flow_0soxp11 + Flow_1s30srz + Flow_1dpz071 + Flow_1c9y4tt + + + Flow_0sjd2ow + Flow_1n9fbuf + Flow_1izo574 + Flow_068zn7p + Flow_1rektlj + Flow_09ykvoz + + + Flow_10ouj06 + Flow_00r7i8p + Flow_0aggkf3 + Flow_1dn1o8l + Flow_0mjbtxw + Flow_0v35org + + + Flow_12s902l + Flow_06chdo3 + Flow_1glw92x + Flow_0bejdyp + Flow_1iyrxjx + Flow_1jy76gi + + + Flow_011feji + Flow_0ke58uw + Flow_0scx59i + Flow_18cml4m + Flow_1jz8xih + Flow_1vylh4c + + + Flow_0uvokpw + Flow_1445xsr + Flow_1e2zd98 + Flow_0287uvz + Flow_0guc4r3 + Flow_1jwqbfj + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/assets/branch_rules/or_1.csv.gz b/tests/assets/branch_rules/or_1.csv.gz new file mode 100644 index 00000000..b0be5487 Binary files /dev/null and b/tests/assets/branch_rules/or_1.csv.gz differ diff --git a/tests/assets/branch_rules/or_2.csv.gz b/tests/assets/branch_rules/or_2.csv.gz new file mode 100644 index 00000000..b8951f1a Binary files /dev/null and b/tests/assets/branch_rules/or_2.csv.gz differ diff --git a/tests/assets/branch_rules/or_3.csv.gz b/tests/assets/branch_rules/or_3.csv.gz new file mode 100644 index 00000000..9afee70a Binary files /dev/null and b/tests/assets/branch_rules/or_3.csv.gz differ diff --git a/tests/assets/branch_rules/or_4.csv.gz b/tests/assets/branch_rules/or_4.csv.gz new file mode 100644 index 00000000..22fdf0c7 Binary files /dev/null and b/tests/assets/branch_rules/or_4.csv.gz differ diff --git a/tests/assets/branch_rules/or_5.csv.gz b/tests/assets/branch_rules/or_5.csv.gz new file mode 100644 index 00000000..c8007305 Binary files /dev/null and b/tests/assets/branch_rules/or_5.csv.gz differ diff --git a/tests/assets/branch_rules/or_6.csv.gz b/tests/assets/branch_rules/or_6.csv.gz new file mode 100644 index 00000000..c6cc1c5c Binary files /dev/null and b/tests/assets/branch_rules/or_6.csv.gz differ diff --git a/tests/assets/branch_rules/or_7.csv.gz b/tests/assets/branch_rules/or_7.csv.gz new file mode 100644 index 00000000..5d969dd0 Binary files /dev/null and b/tests/assets/branch_rules/or_7.csv.gz differ diff --git a/tests/assets/branch_rules/or_8.csv.gz b/tests/assets/branch_rules/or_8.csv.gz new file mode 100644 index 00000000..056b1bdd Binary files /dev/null and b/tests/assets/branch_rules/or_8.csv.gz differ diff --git a/tests/assets/branch_rules/xor.bpmn b/tests/assets/branch_rules/xor.bpmn new file mode 100644 index 00000000..9b420e0a --- /dev/null +++ b/tests/assets/branch_rules/xor.bpmn @@ -0,0 +1,453 @@ + + + + + Flow_1vzhb1l + + + Flow_1vzhb1l + Flow_0xb2xf0 + + + + Flow_0xb2xf0 + Flow_0n2s5vp + Flow_0soxp11 + Flow_1s30srz + Flow_1dpz071 + Flow_1c9y4tt + + + + Flow_0n2s5vp + Flow_0sjd2ow + + + + Flow_0soxp11 + Flow_1n9fbuf + + + + Flow_1s30srz + Flow_1izo574 + + + + Flow_1dpz071 + Flow_068zn7p + + + + Flow_1c9y4tt + Flow_1rektlj + + + + Flow_0sjd2ow + Flow_1n9fbuf + Flow_1izo574 + Flow_068zn7p + Flow_1rektlj + Flow_09ykvoz + + + + + + + + Flow_09ykvoz + Flow_10ouj06 + + + + Flow_10ouj06 + Flow_00r7i8p + Flow_0aggkf3 + Flow_1dn1o8l + Flow_0mjbtxw + Flow_0v35org + + + + Flow_00r7i8p + Flow_1glw92x + + + + Flow_0aggkf3 + Flow_0bejdyp + + + + Flow_1dn1o8l + Flow_1iyrxjx + + + + Flow_0mjbtxw + Flow_06chdo3 + + + + Flow_0v35org + Flow_12s902l + + + + Flow_12s902l + Flow_06chdo3 + Flow_1glw92x + Flow_0bejdyp + Flow_1iyrxjx + Flow_1jy76gi + + + + + + + + Flow_1jy76gi + Flow_011feji + + + + Flow_011feji + Flow_0ke58uw + Flow_0scx59i + Flow_18cml4m + Flow_1jz8xih + Flow_1vylh4c + + + + Flow_0ke58uw + Flow_1445xsr + + + + Flow_0scx59i + Flow_0287uvz + + + + Flow_18cml4m + Flow_0guc4r3 + + + + Flow_1jz8xih + Flow_0uvokpw + + + + Flow_1vylh4c + Flow_1e2zd98 + + + + Flow_0uvokpw + Flow_1445xsr + Flow_1e2zd98 + Flow_0287uvz + Flow_0guc4r3 + Flow_1jwqbfj + + + + + + + + Flow_1jwqbfj + Flow_1a55712 + + + + Flow_1a55712 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/assets/branch_rules/xor_1.csv.gz b/tests/assets/branch_rules/xor_1.csv.gz new file mode 100644 index 00000000..4bf5515a Binary files /dev/null and b/tests/assets/branch_rules/xor_1.csv.gz differ diff --git a/tests/assets/branch_rules/xor_2.csv.gz b/tests/assets/branch_rules/xor_2.csv.gz new file mode 100644 index 00000000..34d0b4db Binary files /dev/null and b/tests/assets/branch_rules/xor_2.csv.gz differ diff --git a/tests/assets/branch_rules/xor_3.csv.gz b/tests/assets/branch_rules/xor_3.csv.gz new file mode 100644 index 00000000..519b2607 Binary files /dev/null and b/tests/assets/branch_rules/xor_3.csv.gz differ diff --git a/tests/assets/branch_rules/xor_5.csv.gz b/tests/assets/branch_rules/xor_5.csv.gz new file mode 100644 index 00000000..661a033e Binary files /dev/null and b/tests/assets/branch_rules/xor_5.csv.gz differ diff --git a/tests/assets/branch_rules/xor_6.csv.gz b/tests/assets/branch_rules/xor_6.csv.gz new file mode 100644 index 00000000..2ce57033 Binary files /dev/null and b/tests/assets/branch_rules/xor_6.csv.gz differ diff --git a/tests/assets/branch_rules/xor_7.csv.gz b/tests/assets/branch_rules/xor_7.csv.gz new file mode 100644 index 00000000..615e26c9 Binary files /dev/null and b/tests/assets/branch_rules/xor_7.csv.gz differ diff --git a/tests/assets/configuration_simod_basic.yml b/tests/assets/configuration_simod_basic.yml index eb6b1c44..ceb269c6 100644 --- a/tests/assets/configuration_simod_basic.yml +++ b/tests/assets/configuration_simod_basic.yml @@ -1,9 +1,9 @@ -version: 4 +version: 5 common: train_log_path: LoanApp_simplified.csv.gz test_log_path: LoanApp_simplified.csv.gz num_final_evaluations: 1 - discover_case_attributes: true + discover_data_attributes: true evaluation_metrics: - absolute_hourly_emd log_ids: diff --git a/tests/assets/configuration_simod_with_extraneous.yml b/tests/assets/configuration_simod_with_extraneous.yml index 8d5dbf3e..835da3b0 100644 --- a/tests/assets/configuration_simod_with_extraneous.yml +++ b/tests/assets/configuration_simod_with_extraneous.yml @@ -1,7 +1,7 @@ -version: 4 +version: 5 common: train_log_path: LoanApp_simplified.csv.gz - discover_case_attributes: true + discover_data_attributes: true log_ids: case: case:concept:name activity: concept:name diff --git a/tests/assets/configuration_simod_with_model.yml b/tests/assets/configuration_simod_with_model.yml index 77bf9f9a..bd205710 100644 --- a/tests/assets/configuration_simod_with_model.yml +++ b/tests/assets/configuration_simod_with_model.yml @@ -1,8 +1,8 @@ -version: 4 +version: 5 common: train_log_path: LoanApp_simplified.csv.gz process_model_path: LoanApp_simplified.bpmn - discover_case_attributes: true + discover_data_attributes: true perform_final_evaluation: true num_final_evaluations: 1 evaluation_metrics: diff --git a/tests/assets/configuration_simod_with_model_and_batching.yml b/tests/assets/configuration_simod_with_model_and_batching.yml index 43dedbf8..70eed272 100644 --- a/tests/assets/configuration_simod_with_model_and_batching.yml +++ b/tests/assets/configuration_simod_with_model_and_batching.yml @@ -1,11 +1,11 @@ -version: 4 +version: 5 common: train_log_path: LoanApp_simplified.csv.gz test_log_path: LoanApp_simplified.csv.gz process_model_path: LoanApp_simplified.bpmn num_final_evaluations: 0 # On purpose so it is corrected to 10 clean_intermediate_files: false - discover_case_attributes: true + discover_data_attributes: true evaluation_metrics: - absolute_hourly_emd log_ids: diff --git a/tests/assets/configuration_simod_with_model_and_extraneous.yml b/tests/assets/configuration_simod_with_model_and_extraneous.yml index 918eff93..e6a43cd8 100644 --- a/tests/assets/configuration_simod_with_model_and_extraneous.yml +++ b/tests/assets/configuration_simod_with_model_and_extraneous.yml @@ -1,8 +1,8 @@ -version: 4 +version: 5 common: train_log_path: LoanApp_simplified.csv.gz process_model_path: LoanApp_simplified.bpmn - discover_case_attributes: true + discover_data_attributes: true num_final_evaluations: 1 evaluation_metrics: - absolute_hourly_emd diff --git a/tests/assets/configuration_simod_with_model_and_prioritization.yml b/tests/assets/configuration_simod_with_model_and_prioritization.yml index 098db3cf..cbd84653 100644 --- a/tests/assets/configuration_simod_with_model_and_prioritization.yml +++ b/tests/assets/configuration_simod_with_model_and_prioritization.yml @@ -1,4 +1,4 @@ -version: 4 +version: 5 common: train_log_path: LoanApp_simplified.csv.gz test_log_path: LoanApp_simplified.csv.gz diff --git a/tests/assets/data_attributes/case_attributes.csv.gz b/tests/assets/data_attributes/case_attributes.csv.gz new file mode 100644 index 00000000..506af7f0 Binary files /dev/null and b/tests/assets/data_attributes/case_attributes.csv.gz differ diff --git a/tests/assets/data_attributes/event_attribute_1.csv.gz b/tests/assets/data_attributes/event_attribute_1.csv.gz new file mode 100644 index 00000000..b52afcb4 Binary files /dev/null and b/tests/assets/data_attributes/event_attribute_1.csv.gz differ diff --git a/tests/assets/data_attributes/event_attribute_15.csv.gz b/tests/assets/data_attributes/event_attribute_15.csv.gz new file mode 100644 index 00000000..8e905c46 Binary files /dev/null and b/tests/assets/data_attributes/event_attribute_15.csv.gz differ diff --git a/tests/assets/data_attributes/event_attribute_3.csv.gz b/tests/assets/data_attributes/event_attribute_3.csv.gz new file mode 100644 index 00000000..f8ac3e96 Binary files /dev/null and b/tests/assets/data_attributes/event_attribute_3.csv.gz differ diff --git a/tests/assets/data_attributes/event_attribute_5.csv.gz b/tests/assets/data_attributes/event_attribute_5.csv.gz new file mode 100644 index 00000000..704c5996 Binary files /dev/null and b/tests/assets/data_attributes/event_attribute_5.csv.gz differ diff --git a/tests/assets/data_attributes/event_attribute_7.csv.gz b/tests/assets/data_attributes/event_attribute_7.csv.gz new file mode 100644 index 00000000..1ab1a58b Binary files /dev/null and b/tests/assets/data_attributes/event_attribute_7.csv.gz differ diff --git a/tests/assets/data_attributes/event_attribute_9.csv.gz b/tests/assets/data_attributes/event_attribute_9.csv.gz new file mode 100644 index 00000000..8ecd8206 Binary files /dev/null and b/tests/assets/data_attributes/event_attribute_9.csv.gz differ diff --git a/tests/assets/data_attributes/global_attribute_1.csv.gz b/tests/assets/data_attributes/global_attribute_1.csv.gz new file mode 100644 index 00000000..fc51c8ed Binary files /dev/null and b/tests/assets/data_attributes/global_attribute_1.csv.gz differ diff --git a/tests/assets/data_attributes/global_attribute_15.csv.gz b/tests/assets/data_attributes/global_attribute_15.csv.gz new file mode 100644 index 00000000..b8520b6a Binary files /dev/null and b/tests/assets/data_attributes/global_attribute_15.csv.gz differ diff --git a/tests/assets/data_attributes/global_attribute_3.csv.gz b/tests/assets/data_attributes/global_attribute_3.csv.gz new file mode 100644 index 00000000..5c68fe2e Binary files /dev/null and b/tests/assets/data_attributes/global_attribute_3.csv.gz differ diff --git a/tests/assets/data_attributes/global_attribute_5.csv.gz b/tests/assets/data_attributes/global_attribute_5.csv.gz new file mode 100644 index 00000000..13357513 Binary files /dev/null and b/tests/assets/data_attributes/global_attribute_5.csv.gz differ diff --git a/tests/assets/data_attributes/global_attribute_7.csv.gz b/tests/assets/data_attributes/global_attribute_7.csv.gz new file mode 100644 index 00000000..b1fad3da Binary files /dev/null and b/tests/assets/data_attributes/global_attribute_7.csv.gz differ diff --git a/tests/assets/data_attributes/global_attribute_9.csv.gz b/tests/assets/data_attributes/global_attribute_9.csv.gz new file mode 100644 index 00000000..afaeab79 Binary files /dev/null and b/tests/assets/data_attributes/global_attribute_9.csv.gz differ diff --git a/tests/assets/model_sequence_self_loop.xes b/tests/assets/model_sequence_self_loop.xes new file mode 100644 index 00000000..4e8eda31 --- /dev/null +++ b/tests/assets/model_sequence_self_loop.xes @@ -0,0 +1,7044 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/assets/model_sequence_self_loop_only_end.xes b/tests/assets/model_sequence_self_loop_only_end.xes new file mode 100644 index 00000000..293e6136 --- /dev/null +++ b/tests/assets/model_sequence_self_loop_only_end.xes @@ -0,0 +1,3648 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/assets/process_model_with_SplitMiner_self_loops.bpmn b/tests/assets/process_model_with_SplitMiner_self_loops.bpmn new file mode 100644 index 00000000..c71274c4 --- /dev/null +++ b/tests/assets/process_model_with_SplitMiner_self_loops.bpmn @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +node_01d8c20e-52d6-4fbf-80d1-f7d458406138 + +node_c450dccc-8229-4e76-b957-494c44fec17e + +node_2b39336a-70f4-4dea-a952-b6f543777b5d + + + +node_4cf191ad-01b2-4038-8b26-18a164000b05 + +node_24fc93fc-4b66-4f44-9e8e-a649a3fcdc12 + +node_135b1874-4857-4b7a-a81d-50beb311441c + + + +node_cbce22a3-4f84-4bea-bc66-d6acc60d873a + +node_4cf191ad-01b2-4038-8b26-18a164000b05 + +node_6c2703c8-ee82-43e5-ba14-1721cf5138c9 + + + +node_2ea0cee5-c9bf-4d90-bf38-82614ca84894 + +node_a1fdda3a-cf96-4980-a32c-cac63028ef50 + +node_b96514e9-abb8-47da-9ce9-6ef16bd246db + +node_5679761c-c5d3-4e3a-8248-13fd7b48ab95 + + + +node_6659ae33-5879-40ee-9b4c-12113190ead1 + +node_01d8c20e-52d6-4fbf-80d1-f7d458406138 + +node_83305431-0a61-43bc-ac36-3acd90ad606c + + + +node_46301cea-0047-452b-a608-16a7e9f79cfe + +node_80986967-4875-41c2-828f-3e144d755b81 + +node_5679761c-c5d3-4e3a-8248-13fd7b48ab95 + +node_c450dccc-8229-4e76-b957-494c44fec17e + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/test_branch_rules/__init__.py b/tests/test_branch_rules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_branch_rules/test_discovery.py b/tests/test_branch_rules/test_discovery.py new file mode 100644 index 00000000..586eec78 --- /dev/null +++ b/tests/test_branch_rules/test_discovery.py @@ -0,0 +1,94 @@ +import os +import glob +import pprint + +import pytest +import pandas as pd +from pathlib import Path +from pix_framework.io.event_log import EventLogIDs +from simod.branch_rules.discovery import discover_branch_rules +from pix_framework.io.bpm_graph import BPMNGraph + +LOG_IDS = EventLogIDs(case="case_id", + activity="activity", + start_time="start_time", + end_time="end_time", + resource="resource" + ) + +ASSET_DIR = "branch_rules" +XOR_BPMN = "xor.bpmn" +OR_BPMN = "or.bpmn" +XOR_LOG_PATHS = "xor_*.csv.gz" +OR_LOG_PATHS = "or_8.csv.gz" + +# total_branch_rules -> How many branches should get rules +# rules_per_branch -> how many single rules should be on that branch (exact number or range) +xor_expected_conditions = { + "xor_1.csv.gz": {"total_branch_rules": 15, "rules_per_branch": 1}, # Categorical equal probs + "xor_2.csv.gz": {"total_branch_rules": 3, "rules_per_branch": 1}, # Categorical unbalanced + "xor_3.csv.gz": {"total_branch_rules": 15, "rules_per_branch": 1}, # Categorical with different probs + "xor_5.csv.gz": {"total_branch_rules": 15, "rules_per_branch": (1, 3)}, # Numerical intervals + "xor_6.csv.gz": {"total_branch_rules": 15, "rules_per_branch": (1, 2)}, # Conditions + "xor_7.csv.gz": {"total_branch_rules": 15, "rules_per_branch": (1, 3)}, # Complex AND and OR conditions +} + +or_expected_conditions = { + "or_1.csv.gz": {"total_branch_rules": 15, "rules_per_branch": 1}, # Categorical equal probs 1 flow only + "or_2.csv.gz": {"total_branch_rules": 15, "rules_per_branch": (1, 2)}, # Categorical equal probs 2 flow2 + "or_3.csv.gz": {"total_branch_rules": 15, "rules_per_branch": 1}, # Categorical equal probs all flows (warning) + "or_4.csv.gz": {"total_branch_rules": 3, "rules_per_branch": 1}, # Categorical unbalanced 1 flow only (warning) + "or_5.csv.gz": {"total_branch_rules": 6, "rules_per_branch": 1}, # Categorical unbalanced 2 flows (warning) + "or_6.csv.gz": {"total_branch_rules": 15, "rules_per_branch": (1, 3)}, # Categorical unbalanced all flows (warning) + "or_7.csv.gz": {"total_branch_rules": 15, "rules_per_branch": (1, 2)}, # Numerical with AND operator + "or_8.csv.gz": {"total_branch_rules": 15, "rules_per_branch": 1}, # Numerical with full range +} + + +@pytest.fixture(scope="module") +def xor_log_files(entry_point): + """Fixture to generate full paths for XOR branch condition log files.""" + xor_log_pattern = os.path.join(entry_point, ASSET_DIR, XOR_LOG_PATHS) + files = glob.glob(xor_log_pattern) + return [(file, xor_expected_conditions[os.path.basename(file)]) for file in files] + + +@pytest.fixture(scope="module") +def or_log_files(entry_point): + or_log_pattern = os.path.join(entry_point, ASSET_DIR, OR_LOG_PATHS) + files = glob.glob(or_log_pattern) + return [(file, or_expected_conditions[os.path.basename(file)]) for file in files] + + +def assert_branch_rules(bpmn_graph, log, log_ids, expected_conditions): + branch_rules = discover_branch_rules(bpmn_graph, log, log_ids) + + assert len(branch_rules) == expected_conditions["total_branch_rules"], \ + f"Expected {expected_conditions['total_branch_rules']} BranchRules, found {len(branch_rules)}" + + for branch_rule in branch_rules: + rule_count = len(branch_rule.rules) + + if isinstance(expected_conditions["rules_per_branch"], tuple): + min_rules, max_rules = expected_conditions["rules_per_branch"] + assert min_rules <= rule_count <= max_rules, \ + f"Expected between {min_rules} and {max_rules} rules, found {rule_count}" + else: + assert rule_count == expected_conditions["rules_per_branch"], \ + f"Expected {expected_conditions['rules_per_branch']} rules, found {rule_count}" + + +def test_discover_xor_branch_rules(entry_point, xor_log_files): + bpmn_path = os.path.join(entry_point, ASSET_DIR, XOR_BPMN) + for log_path, expected_conditions in xor_log_files: + log = pd.read_csv(log_path, compression="gzip") + bpmn_graph = BPMNGraph.from_bpmn_path(Path(bpmn_path)) + assert_branch_rules(bpmn_graph, log, LOG_IDS, expected_conditions) + + +def test_discover_or_branch_rules(entry_point, or_log_files): + bpmn_path = os.path.join(entry_point, ASSET_DIR, OR_BPMN) + for log_path, expected_conditions in or_log_files: + log = pd.read_csv(log_path, compression="gzip") + bpmn_graph = BPMNGraph.from_bpmn_path(Path(bpmn_path)) + assert_branch_rules(bpmn_graph, log, LOG_IDS, expected_conditions) diff --git a/tests/test_case_attributes/test_discovery.py b/tests/test_case_attributes/test_discovery.py index f07e7afe..82aa1687 100644 --- a/tests/test_case_attributes/test_discovery.py +++ b/tests/test_case_attributes/test_discovery.py @@ -1,15 +1,15 @@ from pix_framework.io.event_log import EventLogIDs, read_csv_log -from simod.case_attributes.discovery import discover_case_attributes +from simod.data_attributes.discovery import discover_data_attributes def test_discover_case_attributes(entry_point): log_path = entry_point / "Insurance_Claims_train.csv" log_ids = EventLogIDs( - case="case_id", activity="Activity", start_time="start_time", end_time="end_time", resource="Resource" + case="case_id", activity="activity", start_time="start_time", end_time="end_time", resource="Resource" ) log = read_csv_log(log_path, log_ids) - case_attributes = discover_case_attributes(log, log_ids) + global_attributes, case_attributes, event_attributes = discover_data_attributes(log, log_ids) assert len(case_attributes) > 0 assert "extraneous_delay" in map(lambda x: x.name, case_attributes) diff --git a/tests/test_control_flow/test_discovery.py b/tests/test_control_flow/test_discovery.py index ed98a3c2..159a789e 100644 --- a/tests/test_control_flow/test_discovery.py +++ b/tests/test_control_flow/test_discovery.py @@ -1,27 +1,29 @@ +import shutil import tempfile from pathlib import Path import pytest +from lxml import etree from pix_framework.discovery.gateway_probabilities import GatewayProbabilitiesDiscoveryMethod from pix_framework.io.bpmn import get_activities_names_from_bpmn -from simod.control_flow.discovery import discover_process_model +from simod.control_flow.discovery import discover_process_model, post_process_bpmn_self_loops from simod.control_flow.settings import HyperoptIterationParams from simod.settings.common_settings import Metric from simod.settings.control_flow_settings import ProcessModelDiscoveryAlgorithm control_flow_config_sm2 = { "mining_algorithm": "sm2", - "epsilon": 0.15, - "eta": 0.87, + "epsilon": 0.3, + "eta": 0.5, "replace_or_joins": True, "prioritize_parallelism": True, } control_flow_config_sm1 = { "mining_algorithm": "sm1", - "epsilon": 0.15, - "eta": 0.87, + "epsilon": 0.3, + "eta": 0.5, "replace_or_joins": True, "prioritize_parallelism": True, } @@ -61,3 +63,89 @@ def test_discover_process_model(entry_point, test_data): # Assert is BPMN readable and has activities activities = get_activities_names_from_bpmn(output_path) assert len(activities) > 0 + + +@pytest.mark.parametrize( + "test_data", structure_optimizer_test_data, ids=[test_data["name"] for test_data in structure_optimizer_test_data] +) +def test_discover_process_model_explicit_self_loops(entry_point, test_data): + if test_data["config_data"]["mining_algorithm"] == "sm1": + log_path = entry_point / "model_sequence_self_loop_only_end.xes" + else: + log_path = entry_point / "model_sequence_self_loop.xes" + with tempfile.TemporaryDirectory() as tmp_dir: + output_path = Path(tmp_dir) / "model.bpmn" + params = HyperoptIterationParams( + output_dir=Path(tmp_dir), + provided_model_path=None, + project_name="SelfLoopTest", + optimization_metric=Metric.TWO_GRAM_DISTANCE, + gateway_probabilities_method=GatewayProbabilitiesDiscoveryMethod.DISCOVERY, + mining_algorithm=ProcessModelDiscoveryAlgorithm.from_str(test_data["config_data"]["mining_algorithm"]), + epsilon=test_data["config_data"]["epsilon"], + eta=test_data["config_data"]["eta"], + replace_or_joins=test_data["config_data"]["replace_or_joins"], + prioritize_parallelism=test_data["config_data"]["prioritize_parallelism"], + ) + discover_process_model(log_path, output_path, params) + # Assert that no implicit self-loops are there + tree = etree.parse(output_path) + root = tree.getroot() + ns = {"bpmn": root.nsmap.get(None, "http://www.omg.org/spec/BPMN/20100524/MODEL")} + + tasks = root.findall(".//bpmn:task", namespaces=ns) + for task in tasks: + assert task.find( + "bpmn:standardLoopCharacteristics", + namespaces=ns + ) is None, f"Task '{task.get('name')}' has an implicit self loop" + exclusive_gateways = root.findall(".//bpmn:exclusiveGateway", namespaces=ns) + assert len(exclusive_gateways) == 2, "There should only be two exclusive gateways in this model" + # Commented because SM2 doesn't sort the events, thus no parallelism + # parallel_gateways = root.findall(".//bpmn:parallelGateway", namespaces=ns) + # assert len(parallel_gateways) == 2, "There should only be two parallel gateways in this model" + + +def test_transform_process_model_explicit_self_loops(entry_point): + with tempfile.TemporaryDirectory() as tmp_dir: + # Copy source model with self-loops + original_model_path = entry_point / "process_model_with_SplitMiner_self_loops.bpmn" + model_path = Path(tmp_dir) / "process_model_with_SplitMiner_self_loops.bpmn" + shutil.copy(original_model_path, model_path) + # Fix process model with self-loops in all activities except Start and End + post_process_bpmn_self_loops(model_path) + # Assert that no implicit self-loops are there + tree = etree.parse(model_path) + root = tree.getroot() + ns = {"bpmn": root.nsmap.get(None, "http://www.omg.org/spec/BPMN/20100524/MODEL")} + tasks = root.findall(".//bpmn:task", namespaces=ns) + for task in tasks: + assert task.find( + "bpmn:standardLoopCharacteristics", + namespaces=ns + ) is None, f"Task '{task.get('name')}' has an implicit self loop" + if task.get("name") == "Start": + # Find the incoming flow of the "Start" task + task_id = task.get("id") + sequence_flows = root.findall(".//bpmn:sequenceFlow", namespaces=ns) + incoming_flows = [flow for flow in sequence_flows if flow.get("targetRef") == task_id] + assert len(incoming_flows) == 1, f"Task 'Start' should have exactly one incoming flow" + # Assert that the source element of the incoming flow is the start event + incoming_flow_source = incoming_flows[0].get("sourceRef") + start_events = root.findall(".//bpmn:startEvent", namespaces=ns) + start_event_ids = {event.get("id") for event in start_events} + assert incoming_flow_source in start_event_ids, f"'Start' task was modified." + elif task.get("name") == "End": + # Find the outgoing flow of the "End" task + task_id = task.get("id") + sequence_flows = root.findall(".//bpmn:sequenceFlow", namespaces=ns) + outgoing_flows = [flow for flow in sequence_flows if flow.get("sourceRef") == task_id] + assert len(outgoing_flows) == 1, f"Task 'End' should have exactly one outgoing flow" + # Assert that the target element of the outgoing flow is the end event + outgoing_flow_target = outgoing_flows[0].get("targetRef") + end_events = root.findall(".//bpmn:endEvent", namespaces=ns) + end_event_ids = {event.get("id") for event in end_events} + assert outgoing_flow_target in end_event_ids, f"'End' task was modified." + # Verify number of gateways is original + 2 per self-loop activity + exclusive_gateways = root.findall(".//bpmn:exclusiveGateway", namespaces=ns) + assert len(exclusive_gateways) == 18, "There should only be 18 exclusive gateways in this model" diff --git a/tests/test_data_attributes/__init__.py b/tests/test_data_attributes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_data_attributes/test_discovery.py b/tests/test_data_attributes/test_discovery.py new file mode 100644 index 00000000..553c1a27 --- /dev/null +++ b/tests/test_data_attributes/test_discovery.py @@ -0,0 +1,76 @@ +import os +import glob +import pytest +import pandas as pd +from pix_framework.io.event_log import EventLogIDs +from simod.data_attributes.discovery import discover_data_attributes\ + +LOG_IDS = EventLogIDs(case="case_id", + activity="activity", + start_time="start_time", + end_time="end_time", + resource="resource" + ) + +ASSET_DIR = "data_attributes" +GLOBAL_ATTRIBUTE_LOG_PATHS = "global_attribute_*.csv.gz" +CASE_ATTRIBUTE_LOG_PATHS = "case_attribute*.csv.gz" +EVENT_ATTRIBUTE_LOG_PATHS = "event_attribute*.csv.gz" + + +@pytest.fixture(scope="module") +def global_log_files(entry_point): + log_pattern = os.path.join(entry_point, ASSET_DIR, GLOBAL_ATTRIBUTE_LOG_PATHS) + return glob.glob(log_pattern) + + +@pytest.fixture(scope="module") +def case_log_files(entry_point): + log_pattern = os.path.join(entry_point, ASSET_DIR, CASE_ATTRIBUTE_LOG_PATHS) + return glob.glob(log_pattern) + + +@pytest.fixture(scope="module") +def event_log_files(entry_point): + log_pattern = os.path.join(entry_point, ASSET_DIR, EVENT_ATTRIBUTE_LOG_PATHS) + return glob.glob(log_pattern) + + +def assert_attributes(log, log_ids, expected_case_attrs, expected_event_attrs, expected_global_attrs, runs=5): + success_count = 0 + + for i in range(runs): + global_attributes, case_attributes, event_attributes = discover_data_attributes(log, log_ids) + print(f"try {i}") + try: + assert len(global_attributes) == expected_global_attrs, \ + f"Expected {expected_global_attrs} global attributes, found {len(global_attributes)}" + assert len(case_attributes) == expected_case_attrs, \ + f"Expected {expected_case_attrs} case attributes, found {len(case_attributes)}" + assert len(event_attributes) == expected_event_attrs, \ + f"Expected {expected_event_attrs} event attributes, found {len(event_attributes)}" + success_count += 1 + except AssertionError as e: + print(f"Assertion failed: {e}") + + if success_count < runs // 2: + raise AssertionError("Majority of runs failed") + + +def test_discover_global_attributes(entry_point, global_log_files): + for log_path in global_log_files: + log = pd.read_csv(log_path, compression="gzip") + assert_attributes(log, LOG_IDS, expected_case_attrs=0, expected_event_attrs=16, expected_global_attrs=1) + + +def test_discover_case_attributes(entry_point, case_log_files): + for log_path in case_log_files: + log = pd.read_csv(log_path, compression="gzip") + assert_attributes(log, LOG_IDS, expected_case_attrs=5, expected_event_attrs=0, expected_global_attrs=0) + + +def test_discover_event_attributes(entry_point, event_log_files): + for log_path in event_log_files: + log = pd.read_csv(log_path, compression="gzip") + assert_attributes(log, LOG_IDS, expected_case_attrs=0, expected_event_attrs=1, expected_global_attrs=0) + diff --git a/tests/test_prioritization/test_prioritization_discovery.py b/tests/test_prioritization/test_prioritization_discovery.py index bf6dd023..00a82ae6 100644 --- a/tests/test_prioritization/test_prioritization_discovery.py +++ b/tests/test_prioritization/test_prioritization_discovery.py @@ -1,5 +1,5 @@ from pix_framework.io.event_log import DEFAULT_XES_IDS, read_csv_log -from simod.case_attributes.discovery import discover_case_attributes +from simod.data_attributes.discovery import discover_data_attributes from simod.prioritization.discovery import ( discover_prioritization_rules, ) @@ -35,7 +35,7 @@ def test_discover_prioritization_rules(entry_point): log_ids = DEFAULT_XES_IDS log = read_csv_log(log_path, log_ids) - case_attributes = discover_case_attributes(log, log_ids) + global_attributes, case_attributes, event_attributes = discover_data_attributes(log, log_ids) rules = discover_prioritization_rules(log, log_ids, case_attributes) diff --git a/tests/test_settings/test_simod_settings.py b/tests/test_settings/test_simod_settings.py index c1881d4e..2af91f68 100644 --- a/tests/test_settings/test_simod_settings.py +++ b/tests/test_settings/test_simod_settings.py @@ -1,11 +1,55 @@ from pathlib import Path -import pytest import yaml from simod.settings.simod_settings import SimodSettings -settings = """ +settings_5 = """ +version: 5 +common: + train_log_path: assets/LoanApp_simplified.csv.gz + perform_final_evaluation: true + num_final_evaluations: 1 + evaluation_metrics: + - dl + - absolute_event_distribution + discover_data_attributes: true +preprocessing: + multitasking: false +control_flow: + num_iterations: 2 + mining_algorithm: sm1 + epsilon: + - 0.0 + - 1.0 + eta: + - 0.0 + - 1.0 + gateway_probabilities: + - equiprobable + - discovery + replace_or_joins: + - true + - false + prioritize_parallelism: + - true + - false +resource_model: + num_iterations: 2 + discover_prioritization_rules: true + resource_profiles: + discovery_type: differentiated_by_pool + granularity: 60 + confidence: + - 0.5 + - 0.85 + support: + - 0.01 + - 0.3 + participation: 0.4 +""" + +settings_4 = """ version: 4 common: train_log_path: assets/LoanApp_simplified.csv.gz @@ -14,6 +58,7 @@ evaluation_metrics: - dl - absolute_event_distribution + discover_case_attributes: true preprocessing: multitasking: false control_flow: @@ -50,9 +95,8 @@ """ -@pytest.mark.parametrize("test_case", [settings], ids=["default"]) -def test_configuration(test_case): - config = yaml.safe_load(test_case) +def test_configuration(): + config = yaml.safe_load(settings_5) result = SimodSettings.from_yaml(config) assert result is not None @@ -62,6 +106,14 @@ def test_configuration(test_case): assert_resource_model(config, result) +def test_configuration_legacy(): + ground_truth = SimodSettings.from_yaml(yaml.safe_load(settings_5)) + legacy = SimodSettings.from_yaml(yaml.safe_load(settings_4)) + + assert legacy is not None + assert ground_truth.to_dict() == legacy.to_dict() + + def assert_common(config: dict, result: SimodSettings): config_common = config["common"] result_common = result.common